Swift round up for respondsToSelector() and sizeWithFont() - ios

I am translating the following Objective-C code into Swift:
- (void)layoutSubviews
{
CGSize valueLabelSize = CGSizeZero;
if ([self.valueLabel.text respondsToSelector:#selector(sizeWithAttributes:)])
{
valueLabelSize = [self.valueLabel.text sizeWithAttributes:#{NSFontAttributeName:self.valueLabel.font}];
}
else
{
valueLabelSize = [self.valueLabel.text sizeWithFont:self.valueLabel.font];
}
CGFloat xOffset = ceil((self.bounds.size.width - valueLabelSize.width) * 0.5);
self.valueLabel.frame = CGRectMake(xOffset, ceil(self.bounds.size.height * 0.5) - ceil(valueLabelSize.height * 0.5), valueLabelSize.width, valueLabelSize.height);
}
This code basically loads dynamically an UILabel in the middle of the UIView respecting the relative font size and the padding of the view. This is the code I currently have:
override func layoutSubviews()
{
var valueSize = CGSizeZero
if valueLabel.text != nil {
if ((self.valueLabel.text! as NSString).respondsToSelector(Selector("sizeWithAttributes:"))){
valueSize = (self.valueLabel.text! as NSString).sizeWithAttributes([NSFontAttributeName:self.valueLabel.font])
}
}
let xOffSet = ceil((self.bounds.size.width - valueSize.width) * 0.5)
valueLabel.frame = CGRectMake(xOffSet, ceil(self.bounds.size.height * 0.5) - ceil(valueSize.height * 0.5), valueSize.width, valueSize.height)
}
This is not working correctly cause the label appears in the left-top corner of the view. I believe there is some round up in swift for not using the respondsToSelector() method but do not know how. I also want to avoid using sizeWithFont() cause it's deprecated.
Any help will be very welcomed!

I finally made it work using the superview property:
override func layoutSubviews() {
super.layoutSubviews()
var valueSize = CGSizeZero
if let textValue = valueLabel.text {
valueSize = textValue.sizeWithAttributes([NSFontAttributeName:self.valueLabel.font])
}
var unitSize = CGSizeZero
if let textUnit = unitLabel.text {
unitSize = textUnit.sizeWithAttributes([NSFontAttributeName:self.unitLabel.font])
}
let xOffset = ceil((superview!.bounds.size.width - (valueSize.width + unitSize.width)) * 0.5)
valueLabel.frame = CGRectMake(xOffset, ceil(superview!.bounds.size.height * 0.5) - ceil(valueSize.height * 0.5), valueSize.width, valueSize.height)
unitLabel.frame = CGRectMake(CGRectGetMaxX(valueLabel.frame), ceil(superview!.bounds.size.height * 0.5) - ceil(unitSize.height * 0.5) + iMinistryValueInformationViewPadding, unitSize.width, unitSize.height)
}
Thanks anyway for the help!

Have you tried using label.sizeToFit() to set the correct label size to fit your content?
For example:
class CustomView : UIView {
lazy var textLabel: UILabel = {
let label = UILabel()
// Setup your label's text attributes here.
// e.g. font size
self.addSubview(label)
return label
}()
override func layoutSubviews() {
super.layoutSubviews()
// By this point the `text` for the label and all attributes
// that could affect the text size need to be set.
textLabel.sizeToFit()
textLabel.center = CGPoint(x: frame.width / 2, y: frame.height / 2)
}
}
The result:
Hope that helps!

Related

Drawing multiple rectangle using DrawRect efficiently

I'm trying to draw rectangles pattern using DrawRect like this:
Currently, I'm doing this like so:
class PatternView: UIView {
override func draw(_ rect: CGRect) {
let context = UIGraphicsGetCurrentContext()
let numberOfBoxesPerRow = 7
let boxSide: CGFloat = rect.width / CGFloat(numberOfBoxesPerRow)
var yOrigin: CGFloat = 0
var xOrigin: CGFloat = 0
var isBlack = true
for y in 0...numberOfBoxesPerRow - 1 {
yOrigin = boxSide * CGFloat(y)
for x in 0...numberOfBoxesPerRow - 1 {
xOrigin = boxSide * CGFloat(x)
let color = isBlack ? UIColor.red : UIColor.blue
isBlack = !isBlack
context?.setFillColor(color.cgColor)
let rectnagle = CGRect(origin: .init(x: xOrigin, y: yOrigin), size: .init(width: boxSide, height: boxSide))
context?.addRect(rectnagle)
context?.fill([rectnagle])
}
}
}
}
It's working but I'm trying to optimize it.
Any help will be highly appreciated!
It's difficult to answer "abstract" questions... which this one is, without knowing if you've run some tests / profiling to determine if this code is slow.
However, a couple things you can do to speed it up...
fill the view with one color (red, in this case) and then draw only the other-color boxes
add rects to the context's path, and fill the path once
Take a look at this modification:
class PatternView: UIView {
override func draw(_ rect: CGRect) {
guard let context = UIGraphicsGetCurrentContext() else { return }
let numberOfBoxesPerRow = 7
let boxSide: CGFloat = rect.width / CGFloat(numberOfBoxesPerRow)
context.setFillColor(UIColor.red.cgColor)
context.fill(bounds)
var r: CGRect = CGRect(origin: .zero, size: CGSize(width: boxSide, height: boxSide))
context.beginPath()
for row in 0..<numberOfBoxesPerRow {
r.origin.x = 0.0
for col in 0..<numberOfBoxesPerRow {
if (row % 2 == 0 && col % 2 == 1) || (row % 2 == 1 && col % 2 == 0) {
context.addRect(r)
}
r.origin.x += boxSide
}
r.origin.y += boxSide
}
context.setFillColor(UIColor.blue.cgColor)
context.fillPath()
}
}
There are other options... create a "pattern" background color... use CAShapeLayers and/or CAReplicatorLayers... for example.
Edit
The reason you are getting "blurry edges" is because, as you guessed, you're drawing on partial pixels.
If we modify the values to use whole numbers (using floor()), we can avoid that. Note that the wholeNumberBoxSide * numBoxes may then NOT be exactly equal to the view's rect, so we'll also want to inset the "grid":
class PatternView: UIView {
override func draw(_ rect: CGRect) {
guard let context = UIGraphicsGetCurrentContext() else { return }
let c1: UIColor = .white
let c2: UIColor = .lightGray
let numberOfBoxesPerRow = 7
// use a whole number
let boxSide: CGFloat = floor(rect.width / CGFloat(numberOfBoxesPerRow))
// inset because numBoxes * boxSide may not be exactly equal to rect
let inset: CGFloat = floor((rect.width - boxSide * CGFloat(numberOfBoxesPerRow)) * 0.5)
context.setFillColor(c1.cgColor)
context.fill(CGRect(x: inset, y: inset, width: boxSide * CGFloat(numberOfBoxesPerRow), height: boxSide * CGFloat(numberOfBoxesPerRow)))
var r: CGRect = CGRect(x: inset, y: inset, width: boxSide, height: boxSide)
context.beginPath()
for row in 0..<numberOfBoxesPerRow {
r.origin.x = inset
for col in 0..<numberOfBoxesPerRow {
if (row % 2 == 0 && col % 2 == 1) || (row % 2 == 1 && col % 2 == 0) {
context.addRect(r)
}
r.origin.x += boxSide
}
r.origin.y += boxSide
}
context.setFillColor(c2.cgColor)
context.fillPath()
}
}
We could also get the scale of the main screen (which will be 2x or 3x) and round the boxSide to half- or one-third points to align with the pixels... if really desired.
Edit 2
Additional modifications... settable colors and number of boxes.
Also, using this extension:
// extension to round CGFloat values to floor/nearest CGFloat
// so, for example
// if f == 10.6
// f.floor(nearest: 0.5) = 10.5
// f.floor(nearest: 0.3333) = 10.3333
// f.round(nearest: 0.5) = 10.5
// f.round(nearest: 0.3333) = 10.66666
extension CGFloat {
func round(nearest: CGFloat) -> CGFloat {
let n = 1/nearest
let numberToRound = self * n
return numberToRound.rounded() / n
}
func floor(nearest: CGFloat) -> CGFloat {
let intDiv = CGFloat(Int(self / nearest))
return intDiv * nearest
}
}
We can round the coordinates to match the screen scale.
PatternView class
class PatternView: UIView {
var c1: UIColor = .white { didSet { setNeedsDisplay() } }
var c2: UIColor = .lightGray { didSet { setNeedsDisplay() } }
var numberOfBoxesPerRow = 21 { didSet { setNeedsDisplay() } }
override func draw(_ rect: CGRect) {
guard let context = UIGraphicsGetCurrentContext() else { return }
let sc: CGFloat = 1.0 // / CGFloat(UIScreen.main.scale)
// use a whole number
let boxSide: CGFloat = (rect.width / CGFloat(numberOfBoxesPerRow)).floor(nearest: sc)
// inset because numBoxes * boxSide may not be exactly equal to rect
let inset: CGFloat = ((rect.width - boxSide * CGFloat(numberOfBoxesPerRow)) * 0.5).floor(nearest: sc)
context.setFillColor(c1.cgColor)
context.fill(CGRect(x: inset, y: inset, width: boxSide * CGFloat(numberOfBoxesPerRow), height: boxSide * CGFloat(numberOfBoxesPerRow)))
var r: CGRect = CGRect(x: inset, y: inset, width: boxSide, height: boxSide)
context.beginPath()
for row in 0..<numberOfBoxesPerRow {
r.origin.x = inset
for col in 0..<numberOfBoxesPerRow {
if (row % 2 == 0 && col % 2 == 1) || (row % 2 == 1 && col % 2 == 0) {
context.addRect(r)
}
r.origin.x += boxSide
}
r.origin.y += boxSide
}
context.setFillColor(c2.cgColor)
context.fillPath()
}
}
Example Controller View class
class PatternTestVC: UIViewController {
let pvA = PatternView()
let pvB = PatternView()
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = .systemBlue
let stack = UIStackView()
stack.axis = .vertical
stack.spacing = 8
stack.translatesAutoresizingMaskIntoConstraints = false
view.addSubview(stack)
let g = view.safeAreaLayoutGuide
NSLayoutConstraint.activate([
stack.leadingAnchor.constraint(equalTo: g.leadingAnchor, constant: 40.0),
stack.trailingAnchor.constraint(equalTo: g.trailingAnchor, constant: -40.0),
stack.centerYAnchor.constraint(equalTo: g.centerYAnchor),
])
[pvA, pvB].forEach { v in
v.backgroundColor = .red
v.numberOfBoxesPerRow = 7
v.heightAnchor.constraint(equalTo: v.widthAnchor).isActive = true
stack.addArrangedSubview(v)
}
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
pvB.numberOfBoxesPerRow += 1
}
}
Sets up two pattern views... both start at 7 boxes... each tap anywhere increments the boxes per row in the bottom view.
Here's how it looks with 21 boxes per row (actual size - so really big image):
and zoomed-in 1600%:
Note the red borders... I set the background of the view to red, so we can see that the grid must be inset to account for the non-whole-number box size.
Edit 3
Options to avoid "blurry edges" ...
Suppose we have a view width of 209 and we want 10 boxes.
That gives us a box width of 20.9 ... which results in "blurry edges" -- so we know we need to get to a whole number.
If we round it, we'll get 21 -- 21 x 10 = 210 which will exceed the width of the view. So we need to round it down (floor()).
So...
Option 1:
Option 2:
Option 3:
I think your first move would be to first draw a big red square, then to draw only the blue ones on top of it. It would spare half the computations, even if it does not change the order of magnitude.
EDIT
Note : it is always the drawing itself that consumes time, rarely the other computations. So that is what we have to minimize.
So, my second move would be to replace drawing squares by creating just one complicated BezierPath, that makes all the squares into just one form, and then display it only once.
I do not know if it is possible to do the whole in just one form, but it is possible to make two columns of blue squares into one form.
EDIT 2
Also, I do not understant why there are two instructions here :
context?.addRect(rectnagle)
context?.fill([rectnagle])
Shouldn't only the second be enough ?

How To Scale The Contents Of A UIView To Fit A Destination Rectangle Whilst Maintaining The Aspect Ratio?

I am trying to solve a problem without success and am hoping someone could help.
I have looked for similar posts but haven't been able to find anything which solves my problem.
My Scenario is as follows:
I have a UIView on which a number of other UIViews can be placed. These can be moved, scaled and rotated using gesture recognisers (There is no issue here).
The User is able to change the Aspect Ratio of the Main View (the Canvas) and my problem is trying to scale the content of the Canvas to fit into the new destination size.
There are a number of posts with a similar theme e.g:
calculate new size and location on a CGRect
How to create an image of specific size from UIView
But these don't address the changing of ratios multiple times.
My Approach:
When I change the aspect ratio of the canvas, I make use of AVFoundation to calculate an aspect fitted rectangle which the subviews of the canvas should fit:
let sourceRectangleSize = canvas.frame.size
canvas.setAspect(aspect, screenSize: editorLayoutGuide.layoutFrame.size)
view.layoutIfNeeded()
let destinationRectangleSize = canvas.frame.size
let aspectFittedFrame = AVMakeRect(aspectRatio:sourceRectangleSize, insideRect: CGRect(origin: .zero, size: destinationRectangleSize))
ratioVisualizer.frame = aspectFittedFrame
The Red frame is simply to visualise the Aspect Fitted Rectangle. As you can see whilst the aspect fitted rectangle is correct, the scaling of objects isn't working. This is especially true when I apply scale and rotation to the subviews (CanvasElement).
The logic where I am scaling the objects is clearly wrong:
#objc
private func setRatio(_ control: UISegmentedControl) {
guard let aspect = Aspect(rawValue: control.selectedSegmentIndex) else { return }
let sourceRectangleSize = canvas.frame.size
canvas.setAspect(aspect, screenSize: editorLayoutGuide.layoutFrame.size)
view.layoutIfNeeded()
let destinationRectangleSize = canvas.frame.size
let aspectFittedFrame = AVMakeRect(aspectRatio:sourceRectangleSize, insideRect: CGRect(origin: .zero, size: destinationRectangleSize))
ratioVisualizer.frame = aspectFittedFrame
let scale = min(aspectFittedFrame.size.width/canvas.frame.width, aspectFittedFrame.size.height/canvas.frame.height)
for case let canvasElement as CanvasElement in canvas.subviews {
canvasElement.frame.size = CGSize(
width: canvasElement.baseFrame.width * scale,
height: canvasElement.baseFrame.height * scale
)
canvasElement.frame.origin = CGPoint(
x: aspectFittedFrame.origin.x + canvasElement.baseFrame.origin.x * scale,
y: aspectFittedFrame.origin.y + canvasElement.baseFrame.origin.y * scale
)
}
}
I am enclosing the CanvasElement Class as well if this helps:
final class CanvasElement: UIView {
var rotation: CGFloat = 0
var baseFrame: CGRect = .zero
var id: String = UUID().uuidString
// MARK: - Initialization
override init(frame: CGRect) {
super.init(frame: frame)
storeState()
setupGesture()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
}
// MARK: - Gesture Setup
private func setupGesture() {
let panGestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(panGesture(_:)))
let pinchGestureRecognizer = UIPinchGestureRecognizer(target: self, action: #selector(pinchGesture(_:)))
let rotateGestureRecognizer = UIRotationGestureRecognizer(target: self, action: #selector(rotateGesture(_:)))
addGestureRecognizer(panGestureRecognizer)
addGestureRecognizer(pinchGestureRecognizer)
addGestureRecognizer(rotateGestureRecognizer)
}
// MARK: - Touches
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
super.touchesBegan(touches, with: event)
moveToFront()
}
//MARK: - Gestures
#objc
private func panGesture(_ sender: UIPanGestureRecognizer) {
let move = sender.translation(in: self)
transform = transform.concatenating(.init(translationX: move.x, y: move.y))
sender.setTranslation(CGPoint.zero, in: self)
storeState()
}
#objc
private func pinchGesture(_ sender: UIPinchGestureRecognizer) {
transform = transform.scaledBy(x: sender.scale, y: sender.scale)
sender.scale = 1
storeState()
}
#objc
private func rotateGesture(_ sender: UIRotationGestureRecognizer) {
rotation += sender.rotation
transform = transform.rotated(by: sender.rotation)
sender.rotation = 0
storeState()
}
// MARK: - Miscelaneous
func moveToFront() {
superview?.bringSubviewToFront(self)
}
public func rotated(by degrees: CGFloat) {
transform = transform.rotated(by: degrees)
rotation += degrees
}
func storeState() {
print("""
Element Frame = \(frame)
Element Bounds = \(bounds)
Element Center = \(center)
""")
baseFrame = frame
}
}
Any help or advise, approaches, with some actual examples would be great. Im not expecting anyone to provide full source code, but something which I could use as a basis.
Thank you for taking the time to read my question.
Here are a few thoughts and findings while playing around with this
1. Is the right scale factor being used?
The scaling you use is a bit custom and cannot be compared directly to the examples which has just 1 scale factor like 2 or 3. However, your scale factor has 2 dimensions but I see you compensate for this to get the minimum of the width and height scaling:
let scale = min(aspectFittedFrame.size.width / canvas.frame.width,
aspectFittedFrame.size.height / canvas.frame.height)
In my opinion, I don't think this is the right scale factor. To me this compares new aspectFittedFrame with the new canvas frame
when actually I believe the right scaling factor is to compare the new aspectFittedFrame with the previous canvas frame
let scale
= min(aspectFittedFrame.size.width / sourceRectangleSize.width,
aspectFittedFrame.size.height / sourceRectangleSize.height)
2. Is the scale being applied on the right values?
If you notice, the first order from 1:1 to 16:9 works quite well. However after that it does not seem to work and I believe the issue is here:
for case let canvasElement as CanvasElement in strongSelf.canvas.subviews
{
canvasElement.frame.size = CGSize(
width: canvasElement.baseFrame.width * scale,
height: canvasElement.baseFrame.height * scale
)
canvasElement.frame.origin = CGPoint(
x: aspectFittedFrame.origin.x
+ canvasElement.baseFrame.origin.x * scale,
y: aspectFittedFrame.origin.y
+ canvasElement.baseFrame.origin.y * scale
)
}
The first time, the scale works well because canvas and the canvas elements are being scaled in sync or mapped properly:
However, if you go beyond that, because you are always scaling based on the base values your aspect ratio frame and your canvas elements are out of sync
So in the example of 1:1 -> 16:9 -> 3:2
Your viewport has been scaled twice 1:1 -> 16:9 and from 16:9 -> 3:2
Whereas your elements are scaled once each time, 1:1 -> 16:9 and 1:1 -> 3:2 because you always scale from the base values
So I feel to see the values within the red viewport, you need to apply the same continuous scaling based on the previous view rather than the base view.
Just for an immediate quick fix, I update the base values of the canvas element after each change in canvas size by calling canvasElement.storeState():
for case let canvasElement as CanvasElement in strongSelf.canvas.subviews
{
canvasElement.frame.size = CGSize(
width: canvasElement.baseFrame.width * scale,
height: canvasElement.baseFrame.height * scale
)
canvasElement.frame.origin = CGPoint(
x: aspectFittedFrame.origin.x
+ canvasElement.baseFrame.origin.x * scale,
y: aspectFittedFrame.origin.y
+ canvasElement.baseFrame.origin.y * scale
)
// I added this
canvasElement.storeState()
}
The result is perhaps closer to what you want ?
Final thoughts
While this might fix your issue, you will notice that it is not possible to come back to the original state as at each step a transformation is applied.
A solution could be to store the current values mapped to a specific viewport aspect ratio and calculate the right sizes for the others so that if you needed to get back to the original, you could do that.
Couple suggestions...
First, when using your CanvasElement, panning doesn't work correctly if the view has been rotated.
So, instead of using a translate transform to move the view, change the .center itself. In addition, when panning, we want to use the translation in the superview, not in the view itself:
#objc
func panGesture(_ gest: UIPanGestureRecognizer) {
// change the view's .center instead of applying translate transform
// use translation in superview, not in self
guard let superV = superview else { return }
let translation = gest.translation(in: superV)
center = CGPoint(x: center.x + translation.x, y: center.y + translation.y)
gest.setTranslation(CGPoint.zero, in: superV)
}
Now, when we want to scale the subviews when the "Canvas" changes size, we can do this...
We'll track the "previous" bounds and use the "new bounds" to calculate the scale:
let newBounds: CGRect = bounds
let scW: CGFloat = newBounds.size.width / prevBounds.size.width
let scH: CGFloat = newBounds.size.height / prevBounds.size.height
for case let v as CanvasElement in subviews {
// reset transform before scaling / positioning
let tr = v.transform
v.transform = .identity
let w = v.frame.width * scW
let h = v.frame.height * scH
let cx = v.center.x * scW
let cy = v.center.y * scH
v.frame.size = CGSize(width: w, height: h)
v.center = CGPoint(x: cx, y: cy)
// re-apply transform
v.transform = tr
}
prevBounds = newBounds
Here's a complete sample implementation. Please note: this is Example Code Only!!! It is not intended to be "Production Ready."
import UIKit
// MARK: enum to provide strings and aspect ratio values
enum Aspect: Int, Codable, CaseIterable {
case a1to1
case a16to9
case a3to2
case a4to3
case a9to16
var stringValue: String {
switch self {
case .a1to1:
return "1:1"
case .a16to9:
return "16:9"
case .a3to2:
return "3:2"
case .a4to3:
return "4:3"
case .a9to16:
return "9:16"
}
}
var aspect: CGFloat {
switch self {
case .a1to1:
return 1
case .a16to9:
return 9.0 / 16.0
case .a3to2:
return 2.0 / 3.0
case .a4to3:
return 3.0 / 4.0
case .a9to16:
return 16.0 / 9.0
}
}
}
class EditorView: UIView {
// no code -
// just makes it easier to identify
// this view when debugging
}
// CanvasElement views will be added as subviews
// this handles the scaling / positioning when the bounds changes
// also (optionally) draws a grid (for use during development)
class CanvasView: UIView {
public var showGrid: Bool = true
private let gridLayer: CAShapeLayer = CAShapeLayer()
private var prevBounds: CGRect = .zero
// MARK: init
override init(frame: CGRect) {
super.init(frame: frame)
commonInit()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
commonInit()
}
private func commonInit() {
gridLayer.fillColor = UIColor.clear.cgColor
gridLayer.strokeColor = UIColor.red.cgColor
gridLayer.lineWidth = 1
layer.addSublayer(gridLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
// MARK: 10 x 10 grid
if showGrid {
// draw a grid on the inside of the bounds
// so the edges are not 1/2 point width
let gridBounds: CGRect = bounds.insetBy(dx: 0.5, dy: 0.5)
let path: UIBezierPath = UIBezierPath()
let w: CGFloat = gridBounds.width / 10.0
let h: CGFloat = gridBounds.height / 10.0
var p: CGPoint = .zero
p = CGPoint(x: gridBounds.minX, y: gridBounds.minY)
for _ in 0...10 {
path.move(to: p)
path.addLine(to: CGPoint(x: p.x, y: gridBounds.maxY))
p.x += w
}
p = CGPoint(x: gridBounds.minX, y: gridBounds.minY)
for _ in 0...10 {
path.move(to: p)
path.addLine(to: CGPoint(x: gridBounds.maxX, y: p.y))
p.y += h
}
gridLayer.path = path.cgPath
}
// MARK: update subviews
// we only want to move/scale the subviews if
// the bounds has > 0 width and height and
// prevBounds has > 0 width and height and
// the bounds has changed
guard bounds != prevBounds,
bounds.width > 0, prevBounds.width > 0,
bounds.height > 0, prevBounds.height > 0
else { return }
let newBounds: CGRect = bounds
let scW: CGFloat = newBounds.size.width / prevBounds.size.width
let scH: CGFloat = newBounds.size.height / prevBounds.size.height
for case let v as CanvasElement in subviews {
// reset transform before scaling / positioning
let tr = v.transform
v.transform = .identity
let w = v.frame.width * scW
let h = v.frame.height * scH
let cx = v.center.x * scW
let cy = v.center.y * scH
v.frame.size = CGSize(width: w, height: h)
v.center = CGPoint(x: cx, y: cy)
// re-apply transform
v.transform = tr
}
prevBounds = newBounds
}
override var bounds: CGRect {
willSet {
prevBounds = bounds
}
}
}
// self-contained Pan/Pinch/Rotate view
// set allowSimultaneous to TRUE to enable
// simultaneous gestures
class CanvasElement: UIView, UIGestureRecognizerDelegate {
public var allowSimultaneous: Bool = false
// MARK: init
override init(frame: CGRect) {
super.init(frame: frame)
commonInit()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
commonInit()
}
private func commonInit() {
isUserInteractionEnabled = true
isMultipleTouchEnabled = true
let panG = UIPanGestureRecognizer(target: self, action: #selector(panGesture(_:)))
let pinchG = UIPinchGestureRecognizer(target: self, action: #selector(pinchGesture(_:)))
let rotateG = UIRotationGestureRecognizer(target: self, action: #selector(rotateGesture(_:)))
[panG, pinchG, rotateG].forEach { g in
g.delegate = self
addGestureRecognizer(g)
}
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
super.touchesBegan(touches, with: event)
// unwrap optional superview
guard let superV = superview else { return }
superV.bringSubviewToFront(self)
}
// MARK: UIGestureRecognizer Methods
#objc
func panGesture(_ gest: UIPanGestureRecognizer) {
// change the view's .center instead of applying translate transform
// use translation in superview, not in self
guard let superV = superview else { return }
let translation = gest.translation(in: superV)
center = CGPoint(x: center.x + translation.x, y: center.y + translation.y)
gest.setTranslation(CGPoint.zero, in: superV)
}
#objc
func pinchGesture(_ gest: UIPinchGestureRecognizer) {
// apply scale transform
transform = transform.scaledBy(x: gest.scale, y: gest.scale)
gest.scale = 1
}
#objc
func rotateGesture(_ gest : UIRotationGestureRecognizer) {
// apply rotate transform
transform = transform.rotated(by: gest.rotation)
gest.rotation = 0
}
// MARK: UIGestureRecognizerDelegate Methods
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return allowSimultaneous
}
}
// example view controller
// Aspect Ratio segmented control
// changes the Aspect Ratio of the Editor View
// includes triple-tap gesture to cycle through
// 3 "starting subview" layouts
class ViewController: UIViewController, UIGestureRecognizerDelegate {
let editorView: EditorView = {
let v = EditorView()
v.backgroundColor = UIColor(white: 0.9, alpha: 1.0)
v.translatesAutoresizingMaskIntoConstraints = false
return v
}()
let canvasView: CanvasView = {
let v = CanvasView()
v.backgroundColor = .yellow
v.translatesAutoresizingMaskIntoConstraints = false
return v
}()
// segmented control for selecting Aspect Ratio
let aspectRatioSeg: UISegmentedControl = {
let v = UISegmentedControl()
v.setContentCompressionResistancePriority(.required, for: .vertical)
v.setContentHuggingPriority(.required, for: .vertical)
v.translatesAutoresizingMaskIntoConstraints = false
return v
}()
// this will be changed by the Aspect Ratio segmented control
var evAspectConstraint: NSLayoutConstraint!
// used to cycle through intitial subviews layout
var layoutMode: Int = 0
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor(white: 0.99, alpha: 1.0)
// container view for laying out editor view
let containerView: UIView = {
let v = UIView()
v.backgroundColor = .cyan
v.translatesAutoresizingMaskIntoConstraints = false
return v
}()
// setup the aspect ratio segmented control
for (idx, m) in Aspect.allCases.enumerated() {
aspectRatioSeg.insertSegment(withTitle: m.stringValue, at: idx, animated: false)
}
// add canvas view to editor view
editorView.addSubview(canvasView)
// add editor view to container view
containerView.addSubview(editorView)
// add container view to self's view
view.addSubview(containerView)
// add UI Aspect Ratio segmented control to self's view
view.addSubview(aspectRatioSeg)
// always respect the safe area
let safeG = view.safeAreaLayoutGuide
// editor view inset from container view sides
let evInset: CGFloat = 0
// canvas view inset from editor view sides
let cvInset: CGFloat = 0
// these sets of constraints will make the Editor View and the Canvas View
// as large as their superviews (with "Inset Edge Padding" if set above)
// while maintaining aspect ratios and centering
let evMaxW = editorView.widthAnchor.constraint(lessThanOrEqualTo: containerView.widthAnchor, constant: -evInset)
let evMaxH = editorView.heightAnchor.constraint(lessThanOrEqualTo: containerView.heightAnchor, constant: -evInset)
let evW = editorView.widthAnchor.constraint(equalTo: containerView.widthAnchor)
let evH = editorView.heightAnchor.constraint(equalTo: containerView.heightAnchor)
evW.priority = .required - 1
evH.priority = .required - 1
let cvMaxW = canvasView.widthAnchor.constraint(lessThanOrEqualTo: editorView.widthAnchor, constant: -cvInset)
let cvMaxH = canvasView.heightAnchor.constraint(lessThanOrEqualTo: editorView.heightAnchor, constant: -cvInset)
let cvW = canvasView.widthAnchor.constraint(equalTo: editorView.widthAnchor)
let cvH = canvasView.heightAnchor.constraint(equalTo: editorView.heightAnchor)
cvW.priority = .required - 1
cvH.priority = .required - 1
// editor view starting aspect ratio
// this is changed by the segmented control
let editorAspect: Aspect = .a1to1
aspectRatioSeg.selectedSegmentIndex = editorAspect.rawValue
evAspectConstraint = editorView.heightAnchor.constraint(equalTo: editorView.widthAnchor, multiplier: editorAspect.aspect)
// we can set the Aspect Ratio of the CanvasView here
// it will maintain its Aspect Ratio independent of
// the Editor View's Aspect Ratio
let canvasAspect: Aspect = .a1to1
NSLayoutConstraint.activate([
containerView.topAnchor.constraint(equalTo: safeG.topAnchor),
containerView.leadingAnchor.constraint(equalTo: safeG.leadingAnchor),
containerView.trailingAnchor.constraint(equalTo: safeG.trailingAnchor),
editorView.centerXAnchor.constraint(equalTo: containerView.centerXAnchor),
editorView.centerYAnchor.constraint(equalTo: containerView.centerYAnchor),
evMaxW, evMaxH,
evW, evH,
evAspectConstraint,
canvasView.centerXAnchor.constraint(equalTo: editorView.centerXAnchor),
canvasView.centerYAnchor.constraint(equalTo: editorView.centerYAnchor),
cvMaxW, cvMaxH,
cvW, cvH,
canvasView.heightAnchor.constraint(equalTo: canvasView.widthAnchor, multiplier: canvasAspect.aspect),
aspectRatioSeg.topAnchor.constraint(equalTo: containerView.bottomAnchor, constant: 8.0),
aspectRatioSeg.bottomAnchor.constraint(equalTo: safeG.bottomAnchor, constant: -8.0),
aspectRatioSeg.centerXAnchor.constraint(equalTo: safeG.centerXAnchor),
aspectRatioSeg.widthAnchor.constraint(greaterThanOrEqualTo: safeG.widthAnchor, multiplier: 0.5),
aspectRatioSeg.widthAnchor.constraint(lessThanOrEqualTo: safeG.widthAnchor),
])
aspectRatioSeg.addTarget(self, action: #selector(aspectRatioSegmentChanged(_:)), for: .valueChanged)
// triple-tap anywhere to "reset" the 3 subviews
// cycling between starting sizes/positions
let tt = UITapGestureRecognizer(target: self, action: #selector(resetCanvas))
tt.numberOfTapsRequired = 3
tt.delaysTouchesEnded = false
view.addGestureRecognizer(tt)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// we don't have the frames in viewDidLoad,
// so wait until now to add the CanvasElement views
resetCanvas()
}
#objc func resetCanvas() {
canvasView.subviews.forEach { v in
v.removeFromSuperview()
}
// add 3 views to the canvas
let v1 = CanvasElement()
v1.backgroundColor = .systemYellow
let v2 = CanvasElement()
v2.backgroundColor = .systemGreen
let v3 = CanvasElement()
v3.backgroundColor = .systemBlue
// default size of subviews is 2/10ths the width of the canvas
let w: CGFloat = canvasView.bounds.width * 0.2
[v1, v2, v3].forEach { v in
v.frame = CGRect(x: 0, y: 0, width: w, height: w)
canvasView.addSubview(v)
// if we want to allow simultaneous gestures
// i.e. pan/scale/rotate all at the same time
//v.allowSimultaneous = true
}
switch (layoutMode % 3) {
case 1:
// top-left corner
// center at 1.5 times the size
// bottom-right corner
v1.frame.origin = CGPoint(x: 0, y: 0)
v2.frame.size = CGSize(width: w * 1.5, height: w * 1.5)
v2.center = CGPoint(x: canvasView.bounds.midX, y: canvasView.bounds.midY)
v3.center = CGPoint(x: canvasView.bounds.maxX - w * 0.5, y: canvasView.bounds.maxY - w * 0.5)
()
case 2:
// different sized views
v1.frame = CGRect(x: 0, y: 0, width: w * 0.5, height: w)
v2.frame.size = CGSize(width: w, height: w)
v2.center = CGPoint(x: canvasView.bounds.midX, y: canvasView.bounds.midY)
v3.frame.size = CGSize(width: w, height: w * 0.5)
v3.center = CGPoint(x: canvasView.bounds.maxX - v3.frame.width * 0.5, y: canvasView.bounds.maxY - v3.frame.height * 0.5)
()
default:
// on a "diagonal"
// starting at top-left corner
v1.frame.origin = CGPoint(x: 0, y: 0)
v2.frame.origin = CGPoint(x: w, y: w)
v3.frame.origin = CGPoint(x: w * 2, y: w * 2)
()
}
layoutMode += 1
}
#objc func aspectRatioSegmentChanged(_ sender: Any?) {
if let seg = sender as? UISegmentedControl,
let r = Aspect.init(rawValue: seg.selectedSegmentIndex)
{
evAspectConstraint.isActive = false
evAspectConstraint = editorView.heightAnchor.constraint(equalTo: editorView.widthAnchor, multiplier: r.aspect)
evAspectConstraint.isActive = true
}
}
}
Some sample screenshots...
Yellow is the Canvas view... with optional red 10x10 grid
Gray is the Editor view... this is the view that changes Aspect Ratio
Cyan is the "Container" view.... Editor view fits/centers itself
Note that the Canvas view can be set to something other than a square (1:1 ratio). For example, here it's set to 9:16 ratio -- and maintains its Aspect Ratio independent of the Editor view Aspect Ratio:
With this example controller, triple-tap anywhere to cycle through 3 "starting layouts":
Maybe you can make the three rectangles in a view. And then you can keep the aspect-ratio for the view.
If you are using autolayout and Snapkit. The constrains maybe like this:
view.snp.makeConstraints { make in
make.width.height.lessThanOrEqualToSuperview()
make.centerX.centerY.equalToSuperview()
make.width.equalTo(view.snp.height)
make.width.height.equalToSuperview().priority(.high)
}
So this view will be aspect-fit in superview.
Back to children in this view. If you want to scale every child when view's frame changed, you should add contrains too. Or you can use autoresizingMask, it maybe simpler.
If you didn't want to use autolayout. Maybe you can try transform. When you transform some view, the children in this view will be changed too.
// The scale depends on the aspect-ratio of superview.
view.transform = CGAffineTransformMakeScale(0.5, 0.5);

Building a circular facepile of profile pictures in Swift: how to have the last photo tucked under the first?

I am trying to build a UIView that has a few UIImageViews arranged in a circular, overlapping manner (see image below). Let's say we have N images. Drawing out the first N - 1 is easy, just use sin/cos functions to arrange the centers of the UIImageViews around a circle. The problem is with the last image that seemingly has two z-index values! I know this is possible since kik messenger has similar group profile photos.
The best idea I have come up so far is taking the last image, split into something like "top half" and "bottom half" and assign different z-values for each. This seems doable when the image is the left-most one, but what happens if the image is the top most? In this case, I would need to split left and right instead of top and bottom.
Because of this problem, it's probably not top, left, or right, but more like a split across some imaginary axis from the center of the overall facepile through the center of the UIImageView. How would I do that?!
Below Code Will Layout UIImageView's in Circle
You would need to import SDWebImage and provide some image URLs to run the code below.
import Foundation
import UIKit
import SDWebImage
class EventDetailsFacepileView: UIView {
static let dimension: CGFloat = 66.0
static let radius: CGFloat = dimension / 1.68
private var profilePicViews: [UIImageView] = []
var profilePicURLs: [URL] = [] {
didSet {
updateView()
}
}
func updateView() {
self.profilePicViews = profilePicURLs.map({ (profilePic) -> UIImageView in
let imageView = UIImageView()
imageView.sd_setImage(with: profilePic)
imageView.roundImage(imageDimension: EventDetailsFacepileView.dimension, showsBorder: true)
imageView.sd_imageTransition = .fade
return imageView
})
self.profilePicViews.forEach { (imageView) in
self.addSubview(imageView)
}
self.setNeedsLayout()
self.layer.borderColor = UIColor.green.cgColor
self.layer.borderWidth = 2
}
override func layoutSubviews() {
super.layoutSubviews()
let xOffset: CGFloat = 0
let yOffset: CGFloat = 0
let center = CGPoint(x: self.bounds.size.width / 2, y: self.bounds.size.height / 2)
let radius: CGFloat = EventDetailsFacepileView.radius
let angleStep: CGFloat = 2 * CGFloat(Double.pi) / CGFloat(profilePicViews.count)
var count = 0
for profilePicView in profilePicViews {
let xPos = center.x + CGFloat(cosf(Float(angleStep) * Float(count))) * (radius - xOffset)
let yPos = center.y + CGFloat(sinf(Float(angleStep) * Float(count))) * (radius - yOffset)
profilePicView.frame = CGRect(origin: CGPoint(x: xPos, y: yPos),
size: CGSize(width: EventDetailsFacepileView.dimension, height: EventDetailsFacepileView.dimension))
count += 1
}
}
override func sizeThatFits(_ size: CGSize) -> CGSize {
let requiredSize = EventDetailsFacepileView.dimension + EventDetailsFacepileView.radius
return CGSize(width: requiredSize,
height: requiredSize)
}
}
I don't think you'll have much success trying to split images to get over/under z-indexes.
One approach is to use masks to make it appear that the image views are overlapped.
The general idea would be:
subclass UIImageView
in layoutSubviews()
apply cornerRadius to layer to make the image round
get a rect from the "overlapping view"
convert that rect to local coordinates
expand that rect by the desired width of the "outline"
get an oval path from that rect
combine it with a path from self
apply it as a mask layer
Here is an example....
I was not entirely sure what your sizing calculations were doing... trying to use your EventDetailsFacepileView as-is gave me small images in the lower-right corner of the view?
So, I modified your EventDetailsFacepileView in a couple ways:
uses local images named "pro1" through "pro5" (you should be able to replace with your SDWebImage)
uses auto-layout constraints instead of explicit frames
uses MyOverlapImageView class to handle the masking
Code - no #IBOutlet connections, so just set a blank view controller to OverlapTestViewController:
class OverlapTestViewController: UIViewController {
let facePileView = MyFacePileView()
override func viewDidLoad() {
super.viewDidLoad()
facePileView.translatesAutoresizingMaskIntoConstraints = false
view.addSubview(facePileView)
facePileView.dimension = 120
let sz = facePileView.sizeThatFits(.zero)
let g = view.safeAreaLayoutGuide
NSLayoutConstraint.activate([
facePileView.widthAnchor.constraint(equalToConstant: sz.width),
facePileView.heightAnchor.constraint(equalTo: facePileView.widthAnchor),
facePileView.centerXAnchor.constraint(equalTo: g.centerXAnchor),
facePileView.centerYAnchor.constraint(equalTo: g.centerYAnchor),
])
facePileView.profilePicNames = [
"pro1", "pro2", "pro3", "pro4", "pro5"
]
}
}
class MyFacePileView: UIView {
var dimension: CGFloat = 66.0
lazy var radius: CGFloat = dimension / 1.68
private var profilePicViews: [MyOverlapImageView] = []
var profilePicNames: [String] = [] {
didSet {
updateView()
}
}
func updateView() {
self.profilePicViews = profilePicNames.map({ (profilePic) -> MyOverlapImageView in
let imageView = MyOverlapImageView()
if let img = UIImage(named: profilePic) {
imageView.image = img
}
return imageView
})
// add MyOverlapImageViews to self
// and set width / height constraints
self.profilePicViews.forEach { (imageView) in
self.addSubview(imageView)
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.widthAnchor.constraint(equalToConstant: dimension).isActive = true
imageView.heightAnchor.constraint(equalTo: imageView.widthAnchor).isActive = true
}
// start at "12 o'clock"
var curAngle: CGFloat = .pi * 1.5
// angle increment
let incAngle: CGFloat = ( 360.0 / CGFloat(self.profilePicViews.count) ) * .pi / 180.0
// calculate position for each image view
// set center constraints
self.profilePicViews.forEach { imgView in
let xPos = cos(curAngle) * radius
let yPos = sin(curAngle) * radius
imgView.centerXAnchor.constraint(equalTo: centerXAnchor, constant: xPos).isActive = true
imgView.centerYAnchor.constraint(equalTo: centerYAnchor, constant: yPos).isActive = true
curAngle += incAngle
}
// set "overlapView" property for each image view
let n = self.profilePicViews.count
for i in (1..<n).reversed() {
self.profilePicViews[i].overlapView = self.profilePicViews[i-1]
}
self.profilePicViews[0].overlapView = self.profilePicViews[n - 1]
self.layer.borderColor = UIColor.green.cgColor
self.layer.borderWidth = 2
}
override func sizeThatFits(_ size: CGSize) -> CGSize {
let requiredSize = dimension * 2.0 + radius / 2.0
return CGSize(width: requiredSize,
height: requiredSize)
}
}
class MyOverlapImageView: UIImageView {
// reference to the view that is overlapping me
weak var overlapView: MyOverlapImageView?
// width of "outline"
var outlineWidth: CGFloat = 6
override func layoutSubviews() {
super.layoutSubviews()
// make image round
layer.cornerRadius = bounds.size.width * 0.5
layer.masksToBounds = true
let mask = CAShapeLayer()
if let v = overlapView {
// get bounds from overlapView
// converted to self
// inset by outlineWidth (negative numbers will make it grow)
let maskRect = v.convert(v.bounds, to: self).insetBy(dx: -outlineWidth, dy: -outlineWidth)
// oval path from mask rect
let path = UIBezierPath(ovalIn: maskRect)
// path from self bounds
let clipPath = UIBezierPath(rect: bounds)
// append paths
clipPath.append(path)
mask.path = clipPath.cgPath
mask.fillRule = .evenOdd
// apply mask
layer.mask = mask
}
}
}
Result:
(I grabbed random images by searching google for sample profile pictures)

Custom UIView: animate subLayers with delay

I want to create a custom UIView subclass representing a bunch of stars on a dark-blue sky.
Therefore, I created this view:
import UIKit
class ConstellationView: UIView {
// MARK: - Properties
#IBInspectable var numberOfStars: Int = 80
#IBInspectable var animated: Bool = false
// Private properties
private var starsToDraw = [CAShapeLayer]()
// Layers
private let starsLayer = CAShapeLayer()
// MARK: - Drawing
// override func drawRect(rect: CGRect) {
override func layoutSubviews() {
// Generate stars
drawStars(rect: self.bounds)
}
/// Generate stars
func drawStars(rect: CGRect) {
let width = rect.size.width
let height = rect.size.height
let screenBounds = UIScreen.main.bounds
// Create the stars and store them in starsToDraw array
for _ in 0 ..< numberOfStars {
let x = randomFloat() * width
let y = randomFloat() * height
// Calculate the thinness of the stars as a percentage of the screen resolution
let thin: CGFloat = max(screenBounds.width, screenBounds.height) * 0.003 * randomFloat()
let starLayer = CAShapeLayer()
starLayer.path = UIBezierPath(ovalIn: CGRect(x: x, y: y, width: thin, height: thin)).cgPath
starLayer.fillColor = UIColor.white.cgColor
starsToDraw.append(starLayer)
}
// Define a fade animation
let appearAnimation = CABasicAnimation(keyPath: "opacity")
appearAnimation.fromValue = 0.2
appearAnimation.toValue = 1
appearAnimation.duration = 1
appearAnimation.fillMode = kCAFillModeForwards
// Add the animation to each star (if animated)
for (index, star) in starsToDraw.enumerated() {
if animated {
// Add 1 s between each animation
appearAnimation.beginTime = CACurrentMediaTime() + TimeInterval(index)
star.add(appearAnimation, forKey: nil)
}
starsLayer.insertSublayer(star, at: 0)
}
// Add the stars layer to the view layer
layer.insertSublayer(starsLayer, at: 0)
}
private func randomFloat() -> CGFloat {
return CGFloat(arc4random()) / CGFloat(UINT32_MAX)
}
}
It works quite well, here is the result:
However, I'd like to have it animated, that is, each one of the 80 stars should appear one after the other, with a 1 second delay.
I tried to increase the beginTimeof my animation, but it does not seem to do the trick.
I checked with drawRect or layoutSubviews, but there is no difference.
Could you help me ?
Thanks
PS: to reproduce my app, just create a new single view app in XCode, create a new file with this code, and set the ViewController's view as a ConstellationView, with a dark background color. Also set the animated property to true, either in Interface Builder, or in the code.
PPS: this is in Swift 3, but I think it's still comprehensible :-)
You're really close, only two things to do!
First, you need to specify the key when you add the animation to the layer.
star.add(appearAnimation, forKey: "opacity")
Second, the fill mode for the animation needs to be kCAFillModeBackwards instead of kCAFillModeForwards.
For a more detailed reference see - https://developer.apple.com/library/content/documentation/Cocoa/Conceptual/CoreAnimation_guide/AdvancedAnimationTricks/AdvancedAnimationTricks.html
And here's a fun tutorial (for practice with CAAnimations!) - https://www.raywenderlich.com/102590/how-to-create-a-complex-loading-animation-in-swift
Hope this helps 😀
Full Code:
class ConstellationView: UIView {
// MARK: - Properties
#IBInspectable var numberOfStars: Int = 80
#IBInspectable var animated: Bool = true
// Private properties
private var starsToDraw = [CAShapeLayer]()
// Layers
private let starsLayer = CAShapeLayer()
override func awakeFromNib() {
super.awakeFromNib()
}
// MARK: - Drawing
override func layoutSubviews() {
// Generate stars
drawStars(rect: self.bounds)
}
/// Generate stars
func drawStars(rect: CGRect) {
let width = rect.size.width
let height = rect.size.height
let screenBounds = UIScreen.main.bounds
// Create the stars and store them in starsToDraw array
for _ in 0 ..< numberOfStars {
let x = randomFloat() * width
let y = randomFloat() * height
// Calculate the thinness of the stars as a percentage of the screen resolution
let thin: CGFloat = max(screenBounds.width, screenBounds.height) * 0.003 * randomFloat()
let starLayer = CAShapeLayer()
starLayer.path = UIBezierPath(ovalIn: CGRect(x: x, y: y, width: thin, height: thin)).cgPath
starLayer.fillColor = UIColor.white.cgColor
starsToDraw.append(starLayer)
}
// Define a fade animation
let appearAnimation = CABasicAnimation(keyPath: "opacity")
appearAnimation.fromValue = 0.2
appearAnimation.toValue = 1
appearAnimation.duration = 1
appearAnimation.fillMode = kCAFillModeBackwards
// Add the animation to each star (if animated)
for (index, star) in starsToDraw.enumerated() {
if animated {
// Add 1 s between each animation
appearAnimation.beginTime = CACurrentMediaTime() + TimeInterval(index)
star.add(appearAnimation, forKey: "opacity")
}
starsLayer.insertSublayer(star, above: nil)
}
// Add the stars layer to the view layer
layer.insertSublayer(starsLayer, above: nil)
}
private func randomFloat() -> CGFloat {
return CGFloat(arc4random()) / CGFloat(UINT32_MAX)
}
}

UIImage aspect fit and align to top

It looks like aspect fit aligns the image to the bottom of the frame by default. Is there a way to override the alignment while keeping aspect fit intact?
** EDIT **
This question predates auto layout. In fact, auto layout was being revealed in WWDC 2012 the same week this question was asked
In short, you cannot do this with a UIImageView.
One solution is to subclass a UIView containing an UIImageView and change its frame according to image size. For example, you can find one version here.
Set the UIImageView's bottom layout constraint priority to lowest (i.e. 250) and it will handle it for you.
The way to do this is to modify the contentsRect of the UIImageView layer. The following code from my project (sub class of UIImageView) assumes scaleToFill and offsets the image such that it aligns top, bottom, left or right instead of the default center alignment. For aspectFit is would be a similar solution.
typedef NS_OPTIONS(NSUInteger, AHTImageAlignmentMode) {
AHTImageAlignmentModeCenter = 0,
AHTImageAlignmentModeLeft = 1 << 0,
AHTImageAlignmentModeRight = 1 << 1,
AHTImageAlignmentModeTop = 1 << 2,
AHTImageAlignmentModeBottom = 1 << 3,
AHTImageAlignmentModeDefault = AHTImageAlignmentModeCenter,
};
- (void)updateImageViewContentsRect {
CGRect imageViewContentsRect = CGRectMake(0, 0, 1, 1);
if (self.image.size.height > 0 && self.bounds.size.height > 0) {
CGRect imageViewBounds = self.bounds;
CGSize imageSize = self.image.size;
CGFloat imageViewFactor = imageViewBounds.size.width / imageViewBounds.size.height;
CGFloat imageFactor = imageSize.width / imageSize.height;
if (imageFactor > imageViewFactor) {
//Image is wider than the view, so height will match
CGFloat scaledImageWidth = imageViewBounds.size.height * imageFactor;
CGFloat xOffset = 0.0;
if (BM_CONTAINS_BIT(self.alignmentMode, AHTImageAlignmentModeLeft)) {
xOffset = -(scaledImageWidth - imageViewBounds.size.width) / 2;
} else if (BM_CONTAINS_BIT(self.alignmentMode, AHTImageAlignmentModeRight)) {
xOffset = (scaledImageWidth - imageViewBounds.size.width) / 2;
}
imageViewContentsRect.origin.x = (xOffset / scaledImageWidth);
} else if (imageFactor < imageViewFactor) {
CGFloat scaledImageHeight = imageViewBounds.size.width / imageFactor;
CGFloat yOffset = 0.0;
if (BM_CONTAINS_BIT(self.alignmentMode, AHTImageAlignmentModeTop)) {
yOffset = -(scaledImageHeight - imageViewBounds.size.height) / 2;
} else if (BM_CONTAINS_BIT(self.alignmentMode, AHTImageAlignmentModeBottom)) {
yOffset = (scaledImageHeight - imageViewBounds.size.height) / 2;
}
imageViewContentsRect.origin.y = (yOffset / scaledImageHeight);
}
}
self.layer.contentsRect = imageViewContentsRect;
}
Swift version
class AlignmentImageView: UIImageView {
enum HorizontalAlignment {
case left, center, right
}
enum VerticalAlignment {
case top, center, bottom
}
// MARK: Properties
var horizontalAlignment: HorizontalAlignment = .center
var verticalAlignment: VerticalAlignment = .center
// MARK: Overrides
override var image: UIImage? {
didSet {
updateContentsRect()
}
}
override func layoutSubviews() {
super.layoutSubviews()
updateContentsRect()
}
// MARK: Content layout
private func updateContentsRect() {
var contentsRect = CGRect(origin: .zero, size: CGSize(width: 1, height: 1))
guard let imageSize = image?.size else {
layer.contentsRect = contentsRect
return
}
let viewBounds = bounds
let imageViewFactor = viewBounds.size.width / viewBounds.size.height
let imageFactor = imageSize.width / imageSize.height
if imageFactor > imageViewFactor {
// Image is wider than the view, so height will match
let scaledImageWidth = viewBounds.size.height * imageFactor
var xOffset: CGFloat = 0.0
if case .left = horizontalAlignment {
xOffset = -(scaledImageWidth - viewBounds.size.width) / 2
}
else if case .right = horizontalAlignment {
xOffset = (scaledImageWidth - viewBounds.size.width) / 2
}
contentsRect.origin.x = xOffset / scaledImageWidth
}
else {
let scaledImageHeight = viewBounds.size.width / imageFactor
var yOffset: CGFloat = 0.0
if case .top = verticalAlignment {
yOffset = -(scaledImageHeight - viewBounds.size.height) / 2
}
else if case .bottom = verticalAlignment {
yOffset = (scaledImageHeight - viewBounds.size.height) / 2
}
contentsRect.origin.y = yOffset / scaledImageHeight
}
layer.contentsRect = contentsRect
}
}
this will make the image fill the width and occupy only the height it needs to fit the image (widthly talking)
swift 4.2:
let image = UIImage(named: "my_image")!
let ratio = image.size.width / image.size.height
cardImageView.widthAnchor
.constraint(equalTo: cardImageView.heightAnchor, multiplier: ratio).isActive = true
I had similar problem.
Simplest way was to create own subclass of UIImageView. I add for subclass 3 properties so now it can be use easly without knowing internal implementation:
#property (nonatomic) LDImageVerticalAlignment imageVerticalAlignment;
#property (nonatomic) LDImageHorizontalAlignment imageHorizontalAlignment;
#property (nonatomic) LDImageContentMode imageContentMode;
You can check it here:
https://github.com/LucasssD/LDAlignmentImageView
Add the Aspect Ratio constraint with your image proportions.
Do not pin UIImageView to bottom.
If you want to change the UIImage dynamically remember to update aspect ratio constraint.
I solved this natively in Interface Builder by setting a constraint on the height of the UIImageView, since the image would always be 'pushed' up when the image was larger than the screen size.
More specifically, I set the UIImageView to be the same height as the View it is in (via height constraint), then positioned the UIImageView with spacing constraints in IB. This results in the UIImageView having an 'Aspect Fit' which still respects the top spacing constraint I set in IB.
If you are able to subclass UIImageView, then you can just override the image var.
override var image: UIImage? {
didSet {
self.sizeToFit()
}
}
In Objective-C you can do the same thing by overriding the setter.

Resources