I can connect to the Sequencer a physical instrument like Shaker or a callBackInstrument fine, but not a MIDIInstrument like SynthKick
var sequencer = Sequencer()
var synthKick = SynthKick()
synthKick.enableMIDI()
var track = sequencer.addTrack(for: synthKick)
track.sequence.add(noteNumber: MIDINoteNumber(60), position: 0, duration: 1)
mixer.addInput(synthKick)
on SynthKick:
public override func start(noteNumber: MIDINoteNumber,
velocity: MIDIVelocity,
channel: MIDIChannel,
timeStamp: MIDITimeStamp? = nil) {
play(noteNumber: noteNumber)
}
The code above not outputs signal
What I'm doing wrong ?
I know its a long time since you posed this question but I was looking at this stuff myself so I decided to answer.
I used the CallbackInstrumentConductor in
the following link:
https://github.com/AudioKit/Cookbook/blob/main/Cookbook/CookbookCommon/Sources/CookbookCommon/Recipes/CallbackInstrument.swift
to figure out how to extend MIDIInstrument, I used a synth from DunneAudioKit as the output ,
DunneAudioKit :
https://github.com/AudioKit/DunneAudioKit
The following code worked for me, see if this helps
1: imports
import CoreMIDI
import AudioKit
import DunneAudioKit
import SwiftUI
2: extended MIDIInstrument
class Extended_MidiInstrument : MIDIInstrument{
let synth = Synth()
init(){
super.init()
self.avAudioNode = synth.avAudioNode
}
override func start(noteNumber: MIDINoteNumber, velocity: MIDIVelocity, channel: MIDIChannel, timeStamp: MIDITimeStamp? = nil) {
self.synth.play(noteNumber: noteNumber, velocity: velocity)
}
override func stop(noteNumber: MIDINoteNumber, channel: MIDIChannel, timeStamp: MIDITimeStamp? = nil) {
self.synth.stop(noteNumber: noteNumber)
}
}
3: the Conductor - heavily influenced by the callback conductor.
class Extended_MidiInstrumentConductor: ObservableObject {
let xmidiInst = Extended_MidiInstrument()
let engine = AudioEngine()
var sequencer = AppleSequencer()
var tempo = 120.0
var division = 1
init() {
let seq_Track = sequencer.newTrack()
for i in 0 ..< division {
seq_Track?.add(noteNumber: 40,
velocity: 100,
position: Duration(beats: Double(i) / Double(division)),
duration: Duration(beats: Double(0.1 / Double(division))))
seq_Track?.add(noteNumber: 60,
velocity: 100,
position: Duration(beats: (Double(i) + 0.5) / Double(division)),
duration: Duration(beats: Double(0.1 / Double(division))))
}
seq_Track?.setMIDIOutput(xmidiInst.midiIn)
seq_Track?.setLoopInfo(Duration(beats: 1.0), loopCount: 0)
sequencer.setTempo(tempo)
engine.output = xmidiInst
}
func start_Engine() {
do {
try engine.start()
} catch let err {
Log(err)
}
}
func stop_Engine() {
engine.stop()
}
}
4: (finally) the view for the conductor in (3)
struct Extended_MidiInstrumentView: View {
#StateObject var conductor : Extended_MidiInstrumentConductor
var body: some View {
VStack(spacing: 30) {
Text("Play").onTapGesture {
self.conductor.sequencer.play()
print("play called")
}.foregroundColor(.white)
Text("Pause").onTapGesture {
self.conductor.sequencer.stop()
print("pause called")
}.foregroundColor(.white)
Text("Rewind").onTapGesture {
self.conductor.sequencer.rewind()
print("rewind called")
}.foregroundColor(.white)
}
.onAppear {
self.conductor.start_Engine()
}
.onDisappear {
self.conductor.stop_Engine()
}
}
}
Obviously you don't actually need the print commands, but anyway if that's any help ....
Related
I have a subclass of RealityKit's ARView that has the following function for making a Raycast:
func makeRaycastQuery(alignmentType: ARRaycastQuery.TargetAlignment) -> simd_float4x4? {
let results = self.raycast(from: self.center,
// Better for pinning to planes
allowing: .estimatedPlane,
alignment: alignmentType)
// We don't care about changing scale on raycast so keep it the same
guard var result = results.first?.worldTransform else { return nil }
result.scale = SCNVector3(1, 1, 1)
return result
}
However, my results array is always empty. Is there some sort of other configuration I need to do when setting up an ARView to enable raycasting?
Try this code (I've written it for iPad's Playgrounds):
import RealityKit
import SwiftUI
import ARKit
import PlaygroundSupport
struct ContentView : View {
#State private var arView = ARView(frame: .zero)
var body: some View {
return ARContainer(arView: $arView)
.gesture(
TapGesture()
.onEnded { _ in
raycasting(arView: arView)
}
)
}
func raycasting(arView: ARView) {
guard let query = arView.makeRaycastQuery(from: arView.center,
allowing: .estimatedPlane,
alignment: .any)
else { fatalError() }
guard let result = arView.session.raycast(query).first
else { fatalError() }
let entity = ModelEntity(mesh: .generateSphere(radius: 0.1))
let anchor = AnchorEntity(raycastResult: result)
anchor.addChild(entity)
arView.scene.anchors.append(anchor)
}
}
struct ARContainer : UIViewRepresentable {
#Binding var arView: ARView
func makeUIView(context: Context) -> ARView {
arView.cameraMode = .ar
return arView
}
func updateUIView(_ view: ARView, context: Context) { }
}
PlaygroundPage.current.needsIndefiniteExecution = true
PlaygroundPage.current.setLiveView(ContentView())
P. S.
This version works in UIKit app.
I have two views. The Main View and a Break View. I have a timer running in the Main View which counts down to zero. When the timer reaches zero, I want to be able to switch the screen to Break View. I am using MVVM to keep track of the timers. Using .onReceive to make it look like the timer is running in the background.
I tried using a boolean to check if the timer has reached zero and based on that changed the view, but it's not working and is giving an error saying the result of the view is not used anywhere. I have a navigation view in the Content View if that's of any help.
Thanks in advance.
A snippet of the code :
Main View :
struct MainView: View {
var body: some View {
VStack(alignment: .center, spacing: 50, content: {
Button(action: {
if !fbManager.isTimerStarted {
fbManager.start()
fbManager.isTimerStarted = true
}
else {
fbManager.pause()
fbManager.isTimerStarted = false
}
}, label: {
Image(systemName: fbManager.isTimerStarted == true ? "pause.fill" : "play.fill")
.resizable()
.scaledToFit()
.frame(width: 50, height: 50)
.foregroundColor(Color(red: 1.00, green: 1.00, blue: 1.00))
})
.onReceive(NotificationCenter.default.publisher(
for: UIScene.didEnterBackgroundNotification)) { _ in
if fbManager.isTimerStarted {
movingToBackground()
}
}
.onReceive(NotificationCenter.default.publisher(
for: UIScene.willEnterForegroundNotification)) { _ in
if fbManager.isTimerStarted {
movingToForeground()
}
}
})
}
}
func movingToBackground() {
print("Moving to the background")
notificationDate = Date()
fbManager.pause()
}
func movingToForeground() {
print("Moving to the foreground")
let deltaTime: Int = Int(Date().timeIntervalSince(notificationDate))
fbManager.secondsElapsed -= deltaTime
fbManager.start()
}
}
View Model :
class FocusBreakManager: ObservableObject {
var timer: Timer = Timer()
func start() {
timer = Timer.scheduledTimer(withTimeInterval: 1, repeats: true) { [self] _ in
self.secondsElapsed -= 1
self.focusfill += 0.01667
focusTime = String(secondsElapsed)
focusTime = formatCounter()
if secondsElapsed <= 0 {
stop()
}
}
}
func formatCounter() -> String {
let minutes = Int(secondsElapsed) / 60 % 60
let seconds = Int(secondsElapsed) % 60
return String(format : "%02i : %02i", minutes, seconds)
}
}
Hey to keep up with your solution here is an example of how that could work you would need to use #ObservedObject property wrapper in order to monitor updates from your view.
struct ContentView: View {
#ObservedObject private var focusBreakManager = FocusBreakManager()
var body: some View {
VStack {
Text("\(focusBreakManager.elapsedSeconds)")
Text(focusBreakManager.timerRunningMessage)
Button("Start timer", action: focusBreakManager.start)
}
.padding()
}
}
class FocusBreakManager: ObservableObject {
var timer: Timer = Timer()
#Published var elapsedSeconds = 0
var timerRunningMessage: String {
timerRunning
? "Timer is running"
: "Timer paused"
}
private var timerRunning: Bool {
timer.isValid
}
func start() {
timer = Timer.scheduledTimer(withTimeInterval: 1, repeats: true) { [weak self] _ in
guard let self else { return }
self.elapsedSeconds += 1
if self.elapsedSeconds > 5 {
self.timer.invalidate()
}
}
}
}
You can also take a look at the autoconnect api here's a great tutorial:
https://www.hackingwithswift.com/books/ios-swiftui/triggering-events-repeatedly-using-a-timer
Consider the following example
import AVFoundation
import HaishinKit
import VideoToolbox
import SwiftUI
struct TestStreamView: View {
#State var rtmpStream: RTMPStream?
var rtmpConnection = RTMPConnection()
var body: some View {
ZStack(alignment: .topLeading) {
if let stream = rtmpStream {
BroadcastView(stream: stream)
.cornerRadius(12)
}
}
.edgesIgnoringSafeArea(.top)
.onAppear {
rtmpStream = RTMPStream(connection: rtmpConnection)
guard let stream = rtmpStream else { return }
stream.orientation = .portrait
stream.captureSettings = [
.sessionPreset: AVCaptureSession.Preset.hd1920x1080,
.continuousAutofocus: true,
.continuousExposure: true,
.fps: 30
]
stream.videoSettings = [
.scalingMode: ScalingMode.cropSourceToCleanAperture,
.width: 1080,
.height: 1920,
.bitrate: 5000000,
.profileLevel: kVTProfileLevel_H264_Main_AutoLevel,
.maxKeyFrameIntervalDuration: 2
]
stream.audioSettings = [
.bitrate: 128000 // Always use 128kbps
]
stream.attachAudio(AVCaptureDevice.default(for: .audio))
stream.attachCamera(DeviceUtil.device(withPosition: .front))
}
}
}
Where I am using a library called HaishinKit to set up my live stream feature.
I created a UIViewRepresentable for the MTHKView found in HaishinKit as follows
public struct BroadcastView: UIViewRepresentable {
let stream: RTMPStream
#State private var broadcastView: MTHKView?
public class Coordinator: NSObject, RTMPStreamDelegate {
var parent: BroadcastView
init(_ parent: BroadcastView) {
self.parent = parent
}
// MARK: - RTMPStreamDelegate callbacks
public func rtmpStreamDidClear(_ stream: RTMPStream) {}
public func rtmpStream(_ stream: RTMPStream, didStatics connection: RTMPConnection) {}
public func rtmpStream(_ stream: RTMPStream, didPublishInsufficientBW connection: RTMPConnection) {}
public func rtmpStream(_ stream: RTMPStream, didPublishSufficientBW connection: RTMPConnection) {}
#objc func focusGesture(sender: UITapGestureRecognizer) {
guard let broadcastView = parent.broadcastView else { return }
if sender.state == UIGestureRecognizer.State.ended {
let point = sender.location(in: broadcastView)
let pointOfInterest = CGPoint(x: point.x / broadcastView.bounds.size.width, y: point.y / broadcastView.bounds.size.height)
parent.stream.setPointOfInterest(pointOfInterest, exposure: pointOfInterest)
}
}
}
public func makeCoordinator() -> Coordinator {
Coordinator(self)
}
public func makeUIView(context: Context) -> MTHKView {
let view = MTHKView(frame: .zero)
view.translatesAutoresizingMaskIntoConstraints = false
view.attachStream(stream)
stream.delegate = context.coordinator
DispatchQueue.main.async {
self.broadcastView = view
}
let focusGesture = UITapGestureRecognizer(target: context.coordinator, action: #selector(Coordinator.focusGesture(sender:)))
view.addGestureRecognizer(focusGesture)
return view
}
public func updateUIView(_ uiView: MTHKView, context: Context) {
uiView.attachStream(stream)
}
}
I'm facing two major issues with this setup here.
The MTHKView video capture doesn't extend to fullscreen (observe screenshot below with the black spacing)
I'm required to set the aspectRatio on the videoSettings property which doesn't seem the most ideal if using a different device.
Any input or ideas would be greatly appreciated! Thank you!
Adding view.videoGravity = .resizeAspectFill fixes the problem
Simple and regular approach to animate a bump effect for a button but not simple in SwiftUI.
I'm trying to change scale in tapGesture modifier, but it doesn't have any effect. I don't know how to make chain of animations, probably because SwiftUI doesn't have it. So my naive approach was:
#State private var scaleValue = CGFloat(1)
...
Button(action: {
withAnimation {
self.scaleValue = 1.5
}
withAnimation {
self.scaleValue = 1.0
}
}) {
Image("button1")
.scaleEffect(self.scaleValue)
}
Obviously it doesn't work and buttons image get last scale value immediately.
My second thought was to change scale to 0.8 value on hold event and then after release event make scale to 1.2 and again after few mseconds change it to 1.0. I guess this algorithm should make nice and more natural bump effect. But I couldn't find suitable gesture struct in SwiftUI to handle hold-n-release event.
P.S. For ease understanding, I will describe the steps of the hold-n-release algorithm:
Scale value is 1.0
User touch the button
The button scale becomes 0.8
User release the button
The button scale becomes 1.2
Delay 0.1 sec
The button scale go back to default 1.0
UPD: I found a simple solution using animation delay modifier. But I'm not sure it's right and clear. Also it doens't cover hold-n-release issue:
#State private var scaleValue = CGFloat(1)
...
Button(action: {
withAnimation {
self.scaleValue = 1.5
}
//
// Using delay for second animation block
//
withAnimation(Animation.linear.delay(0.2)) {
self.scaleValue = 1.0
}
}) {
Image("button1")
.scaleEffect(self.scaleValue)
}
UPD 2:
I noticed in solution above it doesn't matter what value I pass as argument to delay modifier: 0.2 or 1000 will have same effect. Perhaps it's a bug 🐞
So I've used Timer instance instead of delay animation modifier. And now it's working as expected:
...
Button(action: {
withAnimation {
self.scaleValue = 1.5
}
//
// Replace it
//
// withAnimation(Animation.linear.delay(0.2)) {
// self.scaleValue = 1.0
// }
//
// by Timer with 0.5 msec delay
//
Timer.scheduledTimer(withTimeInterval: 0.5, repeats: false) { _ in
withAnimation {
self.scaleValue = 1.0
}
}
}) {
...
UPD 3:
Until we waiting official Apple update, one of suitable solution for realization of two events touchStart and touchEnd is based on #average Joe answer:
import SwiftUI
struct TouchGestureViewModifier: ViewModifier {
let minimumDistance: CGFloat
let touchBegan: () -> Void
let touchEnd: (Bool) -> Void
#State private var hasBegun = false
#State private var hasEnded = false
init(minimumDistance: CGFloat, touchBegan: #escaping () -> Void, touchEnd: #escaping (Bool) -> Void) {
self.minimumDistance = minimumDistance
self.touchBegan = touchBegan
self.touchEnd = touchEnd
}
private func isTooFar(_ translation: CGSize) -> Bool {
let distance = sqrt(pow(translation.width, 2) + pow(translation.height, 2))
return distance >= minimumDistance
}
func body(content: Content) -> some View {
content.gesture(DragGesture(minimumDistance: 0)
.onChanged { event in
guard !self.hasEnded else { return }
if self.hasBegun == false {
self.hasBegun = true
self.touchBegan()
} else if self.isTooFar(event.translation) {
self.hasEnded = true
self.touchEnd(false)
}
}
.onEnded { event in
if !self.hasEnded {
let success = !self.isTooFar(event.translation)
self.touchEnd(success)
}
self.hasBegun = false
self.hasEnded = false
}
)
}
}
extension View {
func onTouchGesture(minimumDistance: CGFloat = 20.0,
touchBegan: #escaping () -> Void,
touchEnd: #escaping (Bool) -> Void) -> some View {
modifier(TouchGestureViewModifier(minimumDistance: minimumDistance, touchBegan: touchBegan, touchEnd: touchEnd))
}
}
struct ScaleButtonStyle: ButtonStyle {
func makeBody(configuration: Self.Configuration) -> some View {
configuration.label
.scaleEffect(configuration.isPressed ? 2 : 1)
}
}
struct Test2View: View {
var body: some View {
Button(action: {}) {
Image("button1")
}.buttonStyle(ScaleButtonStyle())
}
}
Upgrading code for answer for iOS 15 (but this also available in iOS 13).
The one-parameter form of the animation() modifier has now been formally deprecated, mostly because it caused all sorts of unexpected behaviors (for ex in Lazy stacks: lazy(v/h)scroll, lazyvgrid): Button animated unexpectedly (jumping) during scrolling.
public struct ScaleButtonStyle: ButtonStyle {
public init() {}
public func makeBody(configuration: Self.Configuration) -> some View {
configuration.label
.scaleEffect(configuration.isPressed ? 0.95 : 1)
.animation(.linear(duration: 0.2), value: configuration.isPressed)
.brightness(configuration.isPressed ? -0.05 : 0)
}
}
public extension ButtonStyle where Self == ScaleButtonStyle {
static var scale: ScaleButtonStyle {
ScaleButtonStyle()
}
}
Yes, it looks like a bug but after my experimenting I found that you can do so
I've post a demo at https://youtu.be/kw4EIOCp78g
struct TestView: View {
#State private var scaleValue = CGFloat(1)
var body: some View {
ZStack {
CustomButton(
touchBegan: {
withAnimation {
self.scaleValue = 2
}
},
touchEnd: {
withAnimation {
self.scaleValue = 1
}
}
){
Image("button1")
}.frame(width: 100, height: 100)
Image("button1").opacity(scaleValue > 1 ? 1 : 0).scaleEffect(self.scaleValue)
}
}
}
struct CustomButton<Content: View>: UIViewControllerRepresentable {
var content: Content
var touchBegan: () -> ()
var touchEnd: () -> ()
typealias UIViewControllerType = CustomButtonController<Content>
init(touchBegan: #escaping () -> (), touchEnd: #escaping () -> (), #ViewBuilder content: #escaping () -> Content) {
self.touchBegan = touchBegan
self.touchEnd = touchEnd
self.content = content()
}
func makeUIViewController(context: Context) -> UIViewControllerType {
CustomButtonController(rootView: self.content, touchBegan: touchBegan, touchEnd: touchEnd)
}
func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) {
}
}
class CustomButtonController<Content: View>: UIHostingController<Content> {
var touchBegan: () -> ()
var touchEnd: () -> ()
init(rootView: Content, touchBegan: #escaping () -> (), touchEnd: #escaping () -> ()) {
self.touchBegan = touchBegan
self.touchEnd = touchEnd
super.init(rootView: rootView)
self.view.isMultipleTouchEnabled = true
}
#objc required dynamic init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
super.touchesBegan(touches, with: event)
self.touchBegan()
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) {
super.touchesCancelled(touches, with: event)
self.touchEnd()
}
//touchesEnded only works if the user moves his finger beyond the bound of the image and releases
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
super.touchesEnded(touches, with: event)
self.touchEnd()
}
}
There is another strange thing if we move and scale the second Image to the first then it will not be shown without .frame(width: 100, height: 100).
Nice and clean swiftUI solution:
#State private var scaleValue = CGFloat(1)
...
Image("button1")
.scaleEffect(self.scaleValue)
.onTouchGesture(
touchBegan: { withAnimation { self.scaleValue = 1.5 } },
touchEnd: { _ in withAnimation { self.scaleValue = 1.0 } }
)
however, you also need to add this code snippet to the project:
struct TouchGestureViewModifier: ViewModifier {
let touchBegan: () -> Void
let touchEnd: (Bool) -> Void
#State private var hasBegun = false
#State private var hasEnded = false
private func isTooFar(_ translation: CGSize) -> Bool {
let distance = sqrt(pow(translation.width, 2) + pow(translation.height, 2))
return distance >= 20.0
}
func body(content: Content) -> some View {
content.gesture(DragGesture(minimumDistance: 0)
.onChanged { event in
guard !self.hasEnded else { return }
if self.hasBegun == false {
self.hasBegun = true
self.touchBegan()
} else if self.isTooFar(event.translation) {
self.hasEnded = true
self.touchEnd(false)
}
}
.onEnded { event in
if !self.hasEnded {
let success = !self.isTooFar(event.translation)
self.touchEnd(success)
}
self.hasBegun = false
self.hasEnded = false
})
}
}
extension View {
func onTouchGesture(touchBegan: #escaping () -> Void,
touchEnd: #escaping (Bool) -> Void) -> some View {
modifier(TouchGestureViewModifier(touchBegan: touchBegan, touchEnd: touchEnd))
}
}
Ok, I think I might have a decent solution here. GIST here
I've put together a bunch of things to make this work. First, a AnimatableModifier that observes if an animation has ended. All thanks to avanderlee
/// An animatable modifier that is used for observing animations for a given animatable value.
public struct AnimationCompletionObserverModifier<Value>: AnimatableModifier where Value: VectorArithmetic {
/// While animating, SwiftUI changes the old input value to the new target value using this property. This value is set to the old value until the animation completes.
public var animatableData: Value {
didSet {
notifyCompletionIfFinished()
}
}
/// The target value for which we're observing. This value is directly set once the animation starts. During animation, `animatableData` will hold the oldValue and is only updated to the target value once the animation completes.
private var targetValue: Value
/// The completion callback which is called once the animation completes.
private var completion: () -> Void
init(observedValue: Value, completion: #escaping () -> Void) {
self.completion = completion
self.animatableData = observedValue
targetValue = observedValue
}
/// Verifies whether the current animation is finished and calls the completion callback if true.
private func notifyCompletionIfFinished() {
guard animatableData == targetValue else { return }
/// Dispatching is needed to take the next runloop for the completion callback.
/// This prevents errors like "Modifying state during view update, this will cause undefined behavior."
DispatchQueue.main.async {
self.completion()
}
}
public func body(content: Content) -> some View {
/// We're not really modifying the view so we can directly return the original input value.
return content
}
}
public extension View {
/// Calls the completion handler whenever an animation on the given value completes.
/// - Parameters:
/// - value: The value to observe for animations.
/// - completion: The completion callback to call once the animation completes.
/// - Returns: A modified `View` instance with the observer attached.
func onAnimationCompleted<Value: VectorArithmetic>(for value: Value, completion: #escaping () -> Void) -> ModifiedContent<Self, AnimationCompletionObserverModifier<Value>> {
return modifier(AnimationCompletionObserverModifier(observedValue: value, completion: completion))
}
}
Now that we can track the end of a animation, another view modifer takes care of tracking the end of the zoom out animation and starts a new zoom in animation using a bunch of booleans to track the animation state. Forgive the naming.
struct ZoomInOutOnTapModifier: ViewModifier {
var destinationScaleFactor: CGFloat
var duration: TimeInterval
init(duration: TimeInterval = 0.3,
destinationScaleFactor: CGFloat = 1.2) {
self.duration = duration
self.destinationScaleFactor = destinationScaleFactor
}
#State var scale: CGFloat = 1
#State var secondHalfAnimationStarted = false
#State var animationCompleted = false
func body(content: Content) -> some View {
content
.scaleEffect(scale)
.simultaneousGesture(DragGesture(minimumDistance: 0.0, coordinateSpace: .global)
.onChanged({ _ in
animationCompleted = true
withAnimation(.linear(duration: duration)) {
scale = destinationScaleFactor
}
})
.onEnded({ _ in
withAnimation(.linear(duration: duration)) {
scale = 1
}
secondHalfAnimationStarted = true
})
)
.onAnimationCompleted(for: scale) {
if scale == 1 {
secondHalfAnimationStarted = false
animationCompleted = true } else if scale == destinationScaleFactor {
animationCompleted = false
secondHalfAnimationStarted = true
}
if !secondHalfAnimationStarted {
withAnimation(.linear(duration: duration)) {
scale = 1
}
}
}
}
}
public extension View {
func addingZoomOnTap(duration: TimeInterval = 0.3, destinationScaleFactor: CGFloat = 1.2) -> some View {
modifier(ZoomInOutOnTapModifier(duration: duration, destinationScaleFactor: destinationScaleFactor))
}
}
All put together:
PlaygroundPage.current.setLiveView(
Button {
print("Button tapped")
} label: {
Text("Tap me")
.font(.system(size: 20))
.foregroundColor(.white)
.padding()
.background(Capsule()
.fill(Color.black))
}
.addingZoomOnTap()
.frame(width: 300, height: 300)
)
Let me know if improvements can be made.
EDIT:
In case you want the button to be in the scaled state until the user lets go(touchUp) of the button, the ViewModifier become much simpler.
struct ZoomInOutOnTapModifier: ViewModifier {
var destinationScaleFactor: CGFloat
var duration: TimeInterval
init(duration: TimeInterval = 0.3,
destinationScaleFactor: CGFloat = 1.2) {
self.duration = duration
self.destinationScaleFactor = destinationScaleFactor
}
#State var scale: CGFloat = 1
func body(content: Content) -> some View {
content
.scaleEffect(scale)
.simultaneousGesture(DragGesture(minimumDistance: 0.0, coordinateSpace: .global)
.onChanged({ _ in
withAnimation(.linear(duration: duration)) {
scale = destinationScaleFactor
}
})
.onEnded({ _ in
withAnimation(.linear(duration: duration)) {
scale = 1
}
})
)
}
}
I'm creating a custom stepper control in SwiftUI, and I'm trying to replicate the accelerating value change behavior of the built-in control. In a SwiftUI Stepper, long pressing on "+" or "-" will keep increasing/decreasing the value with the rate of change getting faster the longer you hold the button.
I can create the visual effect of holding down the button with the following:
struct PressBox: View {
#GestureState var pressed = false
#State var value = 0
var body: some View {
ZStack {
Rectangle()
.fill(pressed ? Color.blue : Color.green)
.frame(width: 70, height: 50)
.gesture(LongPressGesture(minimumDuration: .infinity)
.updating($pressed) { value, state, transaction in
state = value
}
.onChanged { _ in
self.value += 1
}
)
Text("\(value)")
.foregroundColor(.white)
}
}
}
This only increments the value once. Adding a timer publisher to the onChanged modifier for the gesture like this:
let timer = Timer.publish(every: 0.5, on: .main, in: .common)
#State var cancellable: AnyCancellable? = nil
...
.onChanged { _ in
self.cancellable = self.timer.connect() as? AnyCancellable
}
will replicate the changing values, but since the gesture never completes successfully (onEnded will never be called), there's no way to stop the timer. Gestures don't have an onCancelled modifier.
I also tried doing this with a TapGesture which would work for detecting the end of the gesture, but I don't see a way to detect the start of the gesture. This code:
.gesture(TapGesture()
.updating($pressed) { value, state, transaction in
state = value
}
)
generates an error on $pressed:
Cannot convert value of type 'GestureState' to expected argument type 'GestureState<_>'
Is there a way to replicate the behavior without falling back to UIKit?
You'd need an onTouchDown event on the view to start a timer and an onTouchUp event to stop it. SwiftUI doesn't provide a touch down event at the moment, so I think the best way to get what you want is to use the DragGesture this way:
import SwiftUI
class ViewModel: ObservableObject {
private static let updateSpeedThresholds = (maxUpdateSpeed: TimeInterval(0.05), minUpdateSpeed: TimeInterval(0.3))
private static let maxSpeedReachedInNumberOfSeconds = TimeInterval(2.5)
#Published var val: Int = 0
#Published var started = false
private var timer: Timer?
private var currentUpdateSpeed = ViewModel.updateSpeedThresholds.minUpdateSpeed
private var lastValueChangingDate: Date?
private var startDate: Date?
func start() {
if !started {
started = true
val = 0
startDate = Date()
startTimer()
}
}
func stop() {
timer?.invalidate()
currentUpdateSpeed = Self.updateSpeedThresholds.minUpdateSpeed
lastValueChangingDate = nil
started = false
}
private func startTimer() {
timer = Timer.scheduledTimer(withTimeInterval: Self.updateSpeedThresholds.maxUpdateSpeed, repeats: false) {[unowned self] _ in
self.updateVal()
self.updateSpeed()
self.startTimer()
}
}
private func updateVal() {
if self.lastValueChangingDate == nil || Date().timeIntervalSince(self.lastValueChangingDate!) >= self.currentUpdateSpeed {
self.lastValueChangingDate = Date()
self.val += 1
}
}
private func updateSpeed() {
if self.currentUpdateSpeed < Self.updateSpeedThresholds.maxUpdateSpeed {
return
}
let timePassed = Date().timeIntervalSince(self.startDate!)
self.currentUpdateSpeed = timePassed * (Self.updateSpeedThresholds.maxUpdateSpeed - Self.updateSpeedThresholds.minUpdateSpeed)/Self.maxSpeedReachedInNumberOfSeconds + Self.updateSpeedThresholds.minUpdateSpeed
}
}
struct ContentView: View {
#ObservedObject var viewModel: ViewModel
var body: some View {
ZStack {
Rectangle()
.fill(viewModel.started ? Color.blue : Color.green)
.frame(width: 70, height: 50)
.gesture(DragGesture(minimumDistance: 0)
.onChanged { _ in
self.viewModel.start()
}
.onEnded { _ in
self.viewModel.stop()
}
)
Text("\(viewModel.val)")
.foregroundColor(.white)
}
}
}
#if DEBUG
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView(viewModel: ViewModel())
}
}
#endif
Let me know if I got what you wanted or whether I can improve my answer somehow.
For anyone attempting something similar, here's a slightly different take on superpuccio's approach. The api for users of the type is a bit more straightforward, and it minimizes the number of timer fires as the speed ramps up.
struct TimerBox: View {
#Binding var value: Int
#State private var isRunning = false
#State private var startDate: Date? = nil
#State private var timer: Timer? = nil
private static let thresholds = (slow: TimeInterval(0.3), fast: TimeInterval(0.05))
private static let timeToMax = TimeInterval(2.5)
var body: some View {
ZStack {
Rectangle()
.fill(isRunning ? Color.blue : Color.green)
.frame(width: 70, height: 50)
.gesture(DragGesture(minimumDistance: 0)
.onChanged { _ in
self.startRunning()
}
.onEnded { _ in
self.stopRunning()
}
)
Text("\(value)")
.foregroundColor(.white)
}
}
private func startRunning() {
guard isRunning == false else { return }
isRunning = true
startDate = Date()
timer = Timer.scheduledTimer(withTimeInterval: Self.thresholds.slow, repeats: true, block: timerFired)
}
private func timerFired(timer: Timer) {
guard let startDate = self.startDate else { return }
self.value += 1
let timePassed = Date().timeIntervalSince(startDate)
let newSpeed = Self.thresholds.slow - timePassed * (Self.thresholds.slow - Self.thresholds.fast)/Self.timeToMax
let nextFire = Date().advanced(by: max(newSpeed, Self.thresholds.fast))
self.timer?.fireDate = nextFire
}
private func stopRunning() {
timer?.invalidate()
isRunning = false
}
}