Wrong offsets when displaying multiple VNRecognizedObjectObservation boundingBoxes using SwiftUI - ios

I am using Vision to detect objects and after getting [VNRecognizedObjectObservation] I transform the normalized rects before showing them:
let transform = CGAffineTransform(scaleX: 1, y: -1).translatedBy(x: 0, y: -CGFloat(height))
VNImageRectForNormalizedRect(normalizedRect, width, height) // Displayed with SwiftUI, that's why I'm applying transform
.applying(transform)
The width and height are from SwiftUI GeometryReader:
Image(...)
.resizable()
.scaledToFit()
.overlay {
GeometryReader { geometry in // ZStack and ForEach([VNRecognizedObjectObservation], id: \.uuid), then:
let calculatedRect = calculateRect(boundingBox, geometry)
Rectangle()
.frame(width: calculatedRect.width, height: calculatedRect.height)
.offset(x: calculatedRect.origin.x, y: calculatedRect.origin.y)
}
}
But the problem is many boxes are positioned incorrectly (while some are accurate) even on on square images.
This is not related to model because the same images (using same MLModel) have pretty accurate BBs when I try them in Xcode Model Preview section.
Sample Image in my App:
Sample Image in Xcode Preview:
Update (Minimal Reproducible Example):
Having this code inside ContentView.swift as a macOS SwiftUI project while having YOLOv3Tiny.mlmodel in project bundle will produce the same results.
import SwiftUI
import Vision
import CoreML
class Detection: ObservableObject {
let imgURL = URL(string: "https://i.imgur.com/EqsxxTc.jpg")! // Xcode preview generates this: https://i.imgur.com/6IPNQ8b.png
#Published var objects: [VNRecognizedObjectObservation] = []
func getModel() -> VNCoreMLModel? {
if let modelURL = Bundle.main.url(forResource: "YOLOv3Tiny", withExtension: "mlmodelc") {
if let mlModel = try? MLModel(contentsOf: modelURL, configuration: MLModelConfiguration()) {
return try? VNCoreMLModel(for: mlModel)
}
}
return nil
}
func detect() async {
guard let model = getModel(), let tiff = NSImage(contentsOf: imgURL)?.tiffRepresentation else {
fatalError("Either YOLOv3Tiny.mlmodel is not in project bundle, or image failed to load.")
// YOLOv3Tiny: https://ml-assets.apple.com/coreml/models/Image/ObjectDetection/YOLOv3Tiny/YOLOv3Tiny.mlmodel
}
let request = VNCoreMLRequest(model: model) { (request, error) in
DispatchQueue.main.async {
self.objects = (request.results as? [VNRecognizedObjectObservation]) ?? []
}
}
try? VNImageRequestHandler(data: tiff).perform([request])
}
func deNormalize(_ rect: CGRect, _ geometry: GeometryProxy) -> CGRect {
let transform = CGAffineTransform(scaleX: 1, y: -1).translatedBy(x: 0, y: -CGFloat(geometry.size.height))
return VNImageRectForNormalizedRect(rect, Int(geometry.size.width), Int(geometry.size.height)).applying(transform)
}
}
struct ContentView: View {
#StateObject var detection = Detection()
var body: some View {
AsyncImage(url: detection.imgURL) { img in
img.resizable().scaledToFit().overlay {
GeometryReader { geometry in
ZStack {
ForEach(detection.objects, id: \.uuid) { object in
let rect = detection.deNormalize(object.boundingBox, geometry)
Rectangle()
.stroke(lineWidth: 2)
.foregroundColor(.red)
.frame(width: rect.width, height: rect.height)
.offset(x: rect.origin.x, y: rect.origin.y)
}
}
}
}
} placeholder: {
ProgressView()
}
.onAppear {
Task { await self.detection.detect() }
}
}
}
Edit: further testing revealed that VN returns correct positions, and my deNormalize() function also return correct positions and size so it has to be related to SwiftUI.

Issue 1
GeometryReader makes everything inside shrink to its smallest size.
Add .border(Color.orange) to the ZStack and you will see something like what I have below.
You can use .frame(maxWidth: .infinity, maxHeight: .infinity) to make the ZStack stretch to take all the available space.
Issue 2
position vs offset.
offset usually starts at the center then you offset by the specified amount.
position is more like origin.
Positions the center of this view at the specified coordinates in its parent’s coordinate space.
Issue 3
Adjusting for that center positioning vs top left (0, 0) that is used by origin.
Issue 4
The ZStack needs to be flipped on the X axis.
Below is the full code
import SwiftUI
import Vision
import CoreML
#MainActor
class Detection: ObservableObject {
//Moved file to assets
//let imgURL = URL(string: "https://i.imgur.com/EqsxxTc.jpg")! // Xcode preview generates this: https://i.imgur.com/6IPNQ8b.png
let imageName: String = "EqsxxTc"
#Published var objects: [VNRecognizedObjectObservation] = []
func getModel() throws -> VNCoreMLModel {
//Used model directly instead of loading from URL
let model = try YOLOv3Tiny(configuration: .init()).model
let mlModel = try VNCoreMLModel(for: model)
return mlModel
}
func detect() async throws {
let model = try getModel()
guard let tiff = NSImage(named: imageName)?.tiffRepresentation else {
// YOLOv3Tiny: https://ml-assets.apple.com/coreml/models/Image/ObjectDetection/YOLOv3Tiny/YOLOv3Tiny.mlmodel
//fatalError("Either YOLOv3Tiny.mlmodel is not in project bundle, or image failed to load.")
throw AppError.unableToLoadImage
}
//Completion handlers are not compatible with async/await you have to convert to a continuation.
self.objects = try await withCheckedThrowingContinuation { (cont: CheckedContinuation<[VNRecognizedObjectObservation], Error>) in
let request = VNCoreMLRequest(model: model) { (request, error) in
if let error = error{
cont.resume(throwing: error)
}else{
cont.resume(returning: (request.results as? [VNRecognizedObjectObservation]) ?? [])
}
}
do{
try VNImageRequestHandler(data: tiff).perform([request])
}catch{
cont.resume(throwing: error)
}
}
}
func deNormalize(_ rect: CGRect, _ geometry: GeometryProxy) -> CGRect {
return VNImageRectForNormalizedRect(rect, Int(geometry.size.width), Int(geometry.size.height))
}
}
struct ContentView: View {
#StateObject var detection = Detection()
var body: some View {
Image(detection.imageName)
.resizable()
.scaledToFit()
.overlay {
GeometryReader { geometry in
ZStack {
ForEach(detection.objects, id: \.uuid) { object in
let rect = detection.deNormalize(object.boundingBox, geometry)
Rectangle()
.stroke(lineWidth: 2)
.foregroundColor(.red)
.frame(width: rect.width, height: rect.height)
//Changed to position
//Adjusting for center vs leading origin
.position(x: rect.origin.x + rect.width/2, y: rect.origin.y + rect.height/2)
}
}
//Geometry reader makes the view shrink to its smallest size
.frame(maxWidth: .infinity, maxHeight: .infinity)
//Flip upside down
.rotation3DEffect(.degrees(180), axis: (x: 1, y: 0, z: 0))
}.border(Color.orange)
}
.task {
do{
try await self.detection.detect()
}catch{
//Always throw errors to the View so you can tell the user somehow. You don't want crashes or to leave the user waiting for something that has failed.
print(error)
}
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
enum AppError: LocalizedError{
case cannotFindFile
case unableToLoadImage
}
I also changed some other things as you can notice, there are comments in the code.

Okay so after a long time of troubleshooting, I finally managed to make it work correctly (while still not understanding the reason for the problem)...
The problem was this part:
GeometryReader { geometry in
ZStack {
ForEach(detection.objects, id: \.uuid) { object in
let rect = detection.deNormalize(object.boundingBox, geometry)
Rectangle()
.stroke(lineWidth: 2)
.foregroundColor(.red)
.frame(width: rect.width, height: rect.height)
.offset(x: rect.origin.x, y: rect.origin.y)
}
}
}
I assumed because many Rectangle()s will overlap, I need a ZStack() to put them over each other, this turned out to be wrong, apparently when using .offset() they can overlap without any issue, so removing the ZStack() completely solved the problem:
GeometryReader { geometry in
ForEach(detection.objects, id: \.uuid) { object in
let rect = detection.deNormalize(object.boundingBox, geometry)
Rectangle()
.stroke(lineWidth: 2)
.foregroundColor(.red)
.frame(width: rect.width, height: rect.height)
.offset(x: rect.origin.x, y: rect.origin.y)
}
}
What I still don't understand, is why moving the ZStack() outside GeometryReader() also solves the problem and why some boxes were in the correct positions while some were not!
ZStack {
GeometryReader { geometry in
ForEach(detection.objects, id: \.uuid) { object in
let rect = detection.deNormalize(object.boundingBox, geometry)
Rectangle()
.stroke(lineWidth: 2)
.foregroundColor(.red)
.frame(width: rect.width, height: rect.height)
.offset(x: rect.origin.x, y: rect.origin.y)
}
}
}

Related

How to align the middle of a View to the bottom of other view in SwiftUI?

So basically I need to come up with a layout that aligns the middle of a View to the bottom of other View in SwiftUI.
To make it more clear, all I need is something like this:
I guess the equivalent in a UIKit world would be:
redView.bottomAnchor.constraint(equalTo: whiteView.centerYAnchor)
Ive tried setting both in a ZStack and offsetting the white View but it won't work on all screen sizes for obvious reasons.
Any tips?
You can use .aligmentGuide (which is a tricky beast, I recommend this explanation)
Here is your solution, its independent of the child view sizes:
struct ContentView: View {
var body: some View {
ZStack(alignment: .bottom) { // subviews generally aligned at bottom
redView
whiteView
// center of this subview aligned to .bottom
.alignmentGuide(VerticalAlignment.bottom,
computeValue: { d in d[VerticalAlignment.center] })
}
.padding()
}
var redView: some View {
VStack {
Text("Red View")
}
.frame(maxWidth: .infinity)
.frame(height: 200)
.background(Color.red)
.cornerRadius(20)
}
var whiteView: some View {
VStack {
Text("White View")
}
.frame(maxWidth: .infinity)
.frame(width: 250, height: 100)
.background(Color.white)
.cornerRadius(20)
.overlay(RoundedRectangle(cornerRadius: 20).stroke())
}
}
You may try this:
ZStack {
Rectangle()
.frame(width: 300, height: 400)
.overlay(
GeometryReader { proxy in
let offsetY = proxy.frame(in: .named("back")).midY
Rectangle()
.fill(Color.red)
.offset(y: offsetY)
}
.frame(width: 150, height: 140)
, alignment: .center)
}
.coordinateSpace(name: "back")
Bascially the idea is to use coordinateSpace to get the frame of the bottom Rectangle and use geometryreader to get the offset needed by comparing the frame of top rectangle with the bottom one. Since we are using overlay and it is already aligned to the center horizontally, we just need to offset y to get the effect you want.
Based on OPs request in comment, this is a solution making use of a custom Layout.
The HalfOverlayLayout takes two subview and places the second half height over the first. The size of the first subview is flexible. As this is my first Layout I'm not sure if I covered all possible size variants, but it should be a start.
struct ContentView: View {
var body: some View {
HalfOverlayLayout {
redView
whiteView
}
.padding()
}
var redView: some View {
VStack {
ForEach(0..<20) { _ in
Text("Red View")
}
}
.padding()
.frame(maxWidth: .infinity, maxHeight: .infinity)
.background(Color.red)
.cornerRadius(20)
}
var whiteView: some View {
VStack {
Text("White View")
}
.frame(width: 200, height: 150)
.background(Color.white)
.cornerRadius(20)
.overlay(RoundedRectangle(cornerRadius: 20).stroke())
}
}
struct HalfOverlayLayout: Layout {
func sizeThatFits(proposal: ProposedViewSize, subviews: Subviews, cache: inout ()) -> CGSize {
let heightContent = subviews.first?.sizeThatFits(.unspecified).height ?? 0
let heightFooter = subviews.last?.sizeThatFits(.unspecified).height ?? 0
let totalHeight = heightContent + heightFooter / 2
let maxsizes = subviews.map { $0.sizeThatFits(.infinity) }
var totalWidth = maxsizes.max {$0.width < $1.width}?.width ?? 0
if let proposedWidth = proposal.width {
if totalWidth > proposedWidth { totalWidth = proposedWidth }
}
return CGSize(width: totalWidth, height: totalHeight)
}
func placeSubviews(in bounds: CGRect, proposal: ProposedViewSize, subviews: Subviews, cache: inout ()) {
let heightFooter = subviews.last?.sizeThatFits(.unspecified).height ?? 0
let maxHeightContent = bounds.height - heightFooter / 2
var pt = CGPoint(x: bounds.midX, y: bounds.minY)
if let first = subviews.first {
var totalWidth = first.sizeThatFits(.infinity).width
if let proposedWidth = proposal.width {
if totalWidth > proposedWidth { totalWidth = proposedWidth }
}
first.place(at: pt, anchor: .top, proposal: .init(width: totalWidth, height: maxHeightContent))
}
pt = CGPoint(x: bounds.midX, y: bounds.maxY)
if let last = subviews.last {
last.place(at: pt, anchor: .bottom, proposal: .unspecified)
}
}
}

How do I modify the background color and shape of drag&drop preview SwiftUI?

I'm trying to implement simple list with draggable cells (iOS 15). For now I have two base solutions: List and ScrollView. None of this methods doesn't lead me to appropriate result.
First of all I'll provide some code:
List item view:
struct ListItemView: View {
let index: Int
var body: some View {
HStack {
Text("Item \(index)")
.foregroundColor(.white)
Spacer()
}
.padding()
.background(Color.gray)
.clipShape(RoundedRectangle(cornerRadius: 16.0))
}
}
First solution (using List):
struct ContentView: View {
var body: some View {
List(0 ..< 10) { i in
ListItemView(index: i)
.onDrag {
NSItemProvider(object: String(describing: "item_\(i)") as NSString)
}
.padding(.bottom, 8.0)
.listRowBackground(Color.clear)
.listRowSeparator(.hidden)
.listRowInsets(EdgeInsets(top: 0, leading: 8, bottom: 0, trailing: 8))
}
.background(Color.black)
.listStyle(PlainListStyle())
}
}
The result is fine, but when I start dragging action, I see some white rect around my cell, which is not what I want:
Second solution (using ScrollView)
struct ContentView: View {
var body: some View {
ScrollView {
ForEach(0 ..< 10) { i in
ListItemView(index: i)
.onDrag {
NSItemProvider(object: String(describing: "item_\(i)") as NSString)
}
}
}
.background(Color.black)
}
}
The result is a bit different: we don't see any coloured rects (I suppose, this is because we are not using List and there is different mechanism of drag&drop feature). Also there is scaled preview and shape without rounded corners.
So, the desirable behaviour is:
a) the size of preview is the same as original size of list item
b) there is rounded-corners shape without white frame
How can I achieve it?
Theoretically your NSItemProvider can offer a previewImageHandler which is a block that draws the image you want the system to use as the drag preview. Unfortunately that doesn't seem to work. The documentation of the onDrag modifier says that it uses a picture of the view as the preview image and it seemingly ignores the previewImageHandler. If someone finds out otherwise, leave a comment and I'll amend the answer. Here's what I tried:
import SwiftUI
struct ListItem : Identifiable {
let id: Int
let title: String
}
let itemsInList = (0..<10).map { ListItem(id: $0, title: "Item \($0)") }
#ViewBuilder func ListItemView(title : String) -> some View {
ZStack(alignment: .leading) {
RoundedRectangle(cornerRadius: 18, style: .circular)
.foregroundColor(Color(red: 142.0/255.0, green: 142.0/255.0, blue: 142.0/255.0, opacity: 1.0))
Text(title)
.foregroundColor(.white)
.padding()
}
}
struct ContentView: View {
var body: some View {
List(itemsInList) {
let itemTitle = $0.title
ListItemView(title: itemTitle)
.onDrag {
let itemProvider = NSItemProvider()
itemProvider.registerObject("Item \(itemTitle)" as NSString, visibility: .all)
itemProvider.previewImageHandler = {
(completionHandler, expectedClass, options) -> Void in
var resultImage : CGImage?
debugPrint(expectedClass ?? "Unknown")
if let cgContext = CGContext(data: nil,
width: 72,
height: 72,
bitsPerComponent: 8,
bytesPerRow: 16 * (((72 * 4) + 15) / 16),
space: CGColorSpace(name: CGColorSpace.sRGB)!,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue) {
let frame = CGRect(x: 0, y: 0, width: 72, height: 72)
cgContext.clear(frame)
cgContext.setFillColor(UIColor.red.cgColor)
cgContext.fillEllipse(in: frame)
resultImage = cgContext.makeImage()
}
if let completionHandler = completionHandler {
if let resultImage = resultImage {
completionHandler(UIImage(cgImage: resultImage), nil)
} else {
completionHandler(nil, NSError(domain: "ItemCompletion", code: -1, userInfo: nil))
}
}
}
return itemProvider
}
}
}
}
struct previews : PreviewProvider {
static var previews: some View {
ContentView()
}
}

Create sliding effect by offsetting button position with image

I have created a slider view which consists of 2 images, a chevron pointer and a slider line. My ultimate goal is to offset the button in the opposite direction to the chevron image so that when the user clicks the empty space the button will slide to its location. Below is a gif of me attempting to achieve this. My approach is quite manual and I suspect there is a sophisticated way of achieving my goal. I have it working when I click the button once, I suspect that on the other side I have my numbers wrong so it doesn't work when the image is on the left side of the screen.
import SwiftUI
extension Animation {
static func smooth() -> Animation {
Animation.spring()
.speed(0.7)
}
}
struct SliderView: View {
//this works as a toggle you can implement other booleans with this
#State var partyType: Bool = false
#State private var isOn = false
#State var width: CGFloat = 0
#State var height: CGFloat = 0
let screen = UIScreen.main.bounds
var body: some View {
ZStack {
Toggle("",isOn: $isOn).toggleStyle(SliderToggle())
}
}
func imageWidthX(name: String) -> CGFloat {
let image = UIImage(named: name)!
let width = image.size.width
return width * 2.5
}
func hostParty() -> Bool {
if partyType {
return true
} else {
return false
}
}
}
struct SlideEffect: GeometryEffect {
var offset: CGSize
var animatableData: CGSize.AnimatableData {
get { CGSize.AnimatableData(offset.width, offset.height) }
set { offset = CGSize(width: newValue.first, height: newValue.second) }
}
public func effectValue(size: CGSize) -> ProjectionTransform {
return ProjectionTransform(
CGAffineTransform(
translationX: offset.width,
y: offset.height
)
)
}
}
struct SliderView_Previews: PreviewProvider {
static var previews: some View {
SliderView()
}
}
struct SliderToggle: ToggleStyle {
func makeBody(configuration: Configuration) -> some View {
GeometryReader{ geo in
VStack (alignment: .leading,spacing: 0){
Button(action: {
withAnimation() {
configuration.isOn.toggle()
}
}) {
Image("chevronPointer")
.clipped()
.animation(.smooth())
.padding(.horizontal, 30)
.offset(x: geo.size.width - 100, y: 0)
}
.modifier(
SlideEffect(
offset: CGSize(
width: configuration.isOn ? -geo.size.width + imageWidthX(name: "chevronPointer") : 0,
height: 0.0
)
)
)
Image("sliderLine")
.resizable()
.scaledToFit()
.frame(width: geo.size.width, alignment: .center)
}
}
.frame(height: 40)
}
}
func imageWidthX(name: String) -> CGFloat {
let image = UIImage(named: name)!
let width = image.size.width
return width * 2.5
}
I worded the question pretty poorly but ultimately the goal I wanted to achieve was to have 2 buttons on a stack that controlled the view under it. When the opposite side is clicked the slider icon will travel to that side and vice versa. The offset made things a lot more complicated than necessary so I removed this code and basically created invisible buttons. IF there is a more elegant way top achieve this I would be truly greatful
HStack {
ZStack {
Button (" ") {
withAnimation(){
configuration.isOn.toggle()
}}
.padding(.trailing).frame(width: 70, height: 25)
Image("chevronPointer")
.clipped()
.animation(.smooth())
.padding(.horizontal, 30)
//.offset(x: geo.size.width - 100, y: 0)
.modifier(SlideEffect(offset: CGSize(width: configuration.isOn ? geo.size.width - imageWidthX(name: "chevronPointer"): 0, height: 0.0)))
}
Spacer()
Button(" "){
withAnimation(){
configuration.isOn.toggle()
}
}.padding(.trailing).frame(width: 70, height: 25)
}

Non-rectangular image cropping SwiftUI?

I would like to allow a user to crop images by drawing a closed shape on the original image. Sort of like Snapchat stickers. I was looking at this post from 2012 and was curious if there is an updated way to do this with SwiftUI.
To clarify, I would like to take user input (drawing) over a displayed image and crop the actual image (not the view) with a mask that is that shape.
Actually your question need to more specific.
but you can resize your image by following code in swift ui.
Image("Your Image name here!")
.resizable()
.frame(width: 300, height: 300)
if you want to crop at any position of the image you may try this one
import SwiftUI
struct ContentView: View {
var body: some View {
CropImage(imageName: "Your Image name here!")
}
}
// struct Shape
struct CropFrame: Shape {
let isActive: Bool
func path(in rect: CGRect) -> Path {
guard isActive else { return Path(rect) } // full rect for non active
let size = CGSize(width: UIScreen.main.bounds.size.width*0.7, height: UIScreen.main.bounds.size.height/5)
let origin = CGPoint(x: rect.midX - size.width/2, y: rect.midY - size.height/2)
return Path(CGRect(origin: origin, size: size).integral)
}
}
// struct image crop
struct CropImage: View {
let imageName: String
#State private var currentPosition: CGSize = .zero
#State private var newPosition: CGSize = .zero
#State private var clipped = false
var body: some View {
VStack {
ZStack {
Image(imageName)
.resizable()
.scaledToFit()
.offset(x: currentPosition.width, y: currentPosition.height)
Rectangle()
.fill(Color.black.opacity(0.3))
.frame(width: UIScreen.main.bounds.size.width*0.7 , height: UIScreen.main.bounds.size.height/5)
.overlay(Rectangle().stroke(Color.white, lineWidth: 3))
}
.clipShape(
CropFrame(isActive: clipped)
)
.gesture(DragGesture()
.onChanged { value in
currentPosition = CGSize(width: value.translation.width + newPosition.width, height: value.translation.height + newPosition.height)
}
.onEnded { value in
currentPosition = CGSize(width: value.translation.width + newPosition.width, height: value.translation.height + newPosition.height)
newPosition = currentPosition
})
Button (action : { clipped.toggle() }) {
Text("Crop Image")
.padding(.all, 10)
.background(Color.blue)
.foregroundColor(.white)
.shadow(color: .gray, radius: 1)
.padding(.top, 50)
}
}
}
}
This is the full code of a view with image cropping.
else you may use a library from GitHub, see the GitHub demo here

Image carousel with transition effect using swiftUI

So, I've been using swiftUI almost from the time of it's release and playing around with it. However, I'm trying to add a new feature into my app which doesn't seem to work as desired.
I want to have an Image carousel(automatic, meaning the image change after a regular interval) with transition effect. After doing a bit of research ,I could possibly find a method to do so. But, it's not coming together.
Here's my code: SwipeImages.swift
import SwiftUI
struct SwipeImages:View{
#State private var difference: CGFloat = 0
#State private var index = 0
let spacing:CGFloat = 10
var timer: Timer{
Timer.scheduledTimer(withTimeInterval: 2, repeats: true) { (timer) in
if self.index < images.count - 1{
self.index += 1
}
else{
self.index = 0
}
}
}
var body:some View{
ContentView(imageData: images[self.index])
.onAppear {
let _ = self.timer
}
}
}
public struct ImageData:Identifiable{
public let id:Int
let image:String
let title:String
let country:String
}
let images = [ImageData(id:0,image:"1a.jpg",title: "String1", country: "country1"),
ImageData(id:1,image:"2a.jpg",title: "String2", country: "country2"),
ImageData(id:2,image:"3a.jpg",title: "String3", country: "country3")]
ContentView.swift
import SwiftUI
struct ContentView:View{
let imageData:ImageData
var transitionStyle:Int = 1
var transition:AnyTransition{
switch transitionStyle{
case 0:
return .opacity
case 1:
return .circular
case 2:
return .stripes(stripes:50,horizontal:true)
default:
return .opacity
}
}
var body:some View{
ZStack{
Image(uiImage: UIImage(named: "\(imageData.image)")!)
.resizable()
.transition(self.transition)
.overlay(
Rectangle()
.fill(LinearGradient(gradient: Gradient(colors: [.clear,.black]), startPoint: .center, endPoint: .bottom))
.clipped()
)
.cornerRadius(2.0)
VStack(alignment: .leading) {
Spacer()
Text("\(imageData.title)")
.font(.title)
.fontWeight(.semibold)
.foregroundColor(.white)
Text(imageData.country)
.foregroundColor(.white)
}
.padding()
}
.shadow(radius:12.0)
.cornerRadius(12.0)
}
}
extension Image{
func imageStyle(height:CGFloat) -> some View{
let shape = RoundedRectangle(cornerRadius: 15.0)
return self.resizable()
.frame(height:height)
.overlay(
Rectangle()
.fill(LinearGradient(gradient: Gradient(colors: [.clear,.black]), startPoint: .center, endPoint: .bottom))
.clipped()
)
.cornerRadius(2.0)
.clipShape(shape)
}
}
extension AnyTransition{
static var circular: AnyTransition{
get{
AnyTransition.modifier(active: ShapeClipModifier(shape: CircleClipShape(pct:1)), identity: ShapeClipModifier(shape: CircleClipShape(pct:0)))
}
}
static func stripes(stripes s:Int,horizontal isHorizontal:Bool) -> AnyTransition{
return AnyTransition.asymmetric(insertion: AnyTransition.modifier(active: ShapeClipModifier(shape:StripeShape(insertion:true,pct:1,stripes:s,horizontal:isHorizontal)), identity:
ShapeClipModifier(shape:StripeShape(insertion:true,pct:0,stripes:s,horizontal:isHorizontal))
), removal:AnyTransition.modifier(active: ShapeClipModifier(shape:StripeShape(insertion:false,pct:1,stripes:s,horizontal:isHorizontal))
, identity:
ShapeClipModifier(shape:StripeShape(insertion:false,pct:0,stripes:s,horizontal:isHorizontal)))
)
}
}
struct ShapeClipModifier<S: Shape>: ViewModifier{
let shape: S
func body(content:Content) -> some View {
content.clipShape(shape)
}
}
struct StripeShape: Shape{
let insertion: Bool
var pct: CGFloat
let stripes: Int
let horizontal: Bool
var animatableData: CGFloat{
get{pct}
set{pct = newValue}
}
func path(in rect:CGRect) -> Path{
var path = Path()
let stripeHeight = rect.height/CGFloat(stripes)
for i in 0..<stripes{
let iteratorValue = CGFloat(i)
if insertion{
path.addRect(CGRect(x: 0, y: iteratorValue * stripeHeight, width: rect.width, height: stripeHeight * (1 - pct)))
}
else{
path.addRect(CGRect(x: 0, y: iteratorValue * stripeHeight + (stripeHeight * pct), width: rect.width, height: stripeHeight * (1 - pct)))
}
}
return path
}
}
struct CircleClipShape: Shape{
var pct:CGFloat
var animatableData: CGFloat{
get{pct}
set{pct = newValue}
}
func path(in rect: CGRect) -> Path {
var path = Path()
var bigRect = rect
bigRect.size.width = bigRect.size.width * 2 * (1-pct)
bigRect.size.height = bigRect.size.height * 2 * (1-pct)
bigRect = bigRect.offsetBy(dx: -rect.width/2.0, dy: -rect.height/2.0)
path = Circle().path(in: bigRect)
return path
}
}
MainView.swift
import SwiftUI
struct MainView:View{
var body:some View{
VStack{
SwipeImages()
.padding()
}
}
}
When the app is launched the images change after scheduled interval specified ,i.e: 2s , but the required transition such as stripes or circular ,nothing seem to come in action. (Also, I tried applying regular inbuilt transitions such as slide and opacity, even they aren't working).
Could you please help me identifying what's wrong, or any other alternatives to achieve the same?
Thanks.
Found somewhat similar to what you are trying to achieve here The guy implemented custom SwiftUI view, with option to animate automatically or by swiping. Very basic implementation but still you can get some ideas.

Resources