Run a loop until UIPanGestureRecognizer ends - ios

Hopefully this isn't too vague for the mods.
I want to make a user interaction similar to the volume controls on some hifi's where you move a dial left or right to change the volume but rather than turning the dial complete revolutions you turn it left or right slightly and the more you turn it the faster the volume changes until you let go and it pings back to the middle.
In my app I want to use a UIPanGestureRecogniser where as the user pans up from the middle the volume goes up, the further from the middle the faster the increase. When they pan below the mid point of the screen the volume goes down, again faster the further from the middle you are.
The area I'm stuck is how to make this happen with out locking up the UI. I can't just use the gestureRecognizer action selector as this is only called when there is movement, for this interaction to work the user will often keep their finger in a single location while waiting for the right volume to be reached.
I feel like I want to set a loop running outside the gesturerecogniser selector and have it monitor a class variable that the gets updated when the gesture moves or ends. If it do this in the gesturerecogniser selector it will get keep running....
If this were an embedded system I would just set up some kind of interrupt based polling to check what the input control was at and keep adding to the volume until it was back to middle - can't find the comparable for iOS here.
Suggestions would be welcome, sorry mods if this is too vague - it's more of a framework methodology question that a specific code issue.

Interesting question I wrote a sample for you which must be what you want:
Objective-C code:
#import "ViewController.h"
#interface ViewController ()
#property float theValue;
#property NSTimer *timer;
#property bool needRecord;
#property UIView *dot;
#end
#implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
self.needRecord = NO;
self.theValue = 0;
UIView *circle = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 50, 50)];
circle.layer.borderWidth = 3;
circle.layer.borderColor = [UIColor redColor].CGColor;
circle.layer.cornerRadius = 25;
circle.center = self.view.center;
[self.view addSubview:circle];
self.dot = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 40, 40)];
self.dot.backgroundColor = [UIColor redColor];
self.dot.layer.cornerRadius = 20;
self.dot.center = self.view.center;
[self.view addSubview:self.dot];
UIPanGestureRecognizer *pan = [[UIPanGestureRecognizer alloc] initWithTarget:self action:#selector(panHandle:)];
[self.dot addGestureRecognizer:pan];
self.timer = [NSTimer scheduledTimerWithTimeInterval:0.1 target:self selector:#selector(timerFire) userInfo:nil repeats:YES];
}
-(void)panHandle:(UIPanGestureRecognizer *)pan{
CGPoint pt = [pan translationInView:self.view];
// NSLog([NSString stringWithFormat:#"pt.y = %f",pt.y]);
switch (pan.state) {
case UIGestureRecognizerStateBegan:
[self draggingStart];
break;
case UIGestureRecognizerStateChanged:
self.dot.center = CGPointMake(self.view.center.x, self.view.center.y + pt.y);
break;
case UIGestureRecognizerStateEnded:
[self draggingEnned];
break;
default:
break;
}
}
-(void)draggingStart{
self.needRecord = YES;
}
-(void)draggingEnned{
self.needRecord = NO;
[UIView animateWithDuration:0.5 animations:^{
self.dot.center = self.view.center;
}];
}
-(void)timerFire{
if (self.needRecord) {
float distance = self.dot.center.y - self.view.center.y;
// NSLog([NSString stringWithFormat:#"distance = %f",distance]);
self.theValue -= distance/1000;
NSLog([NSString stringWithFormat:#"theValue = %f",self.theValue]);
}
}
#end
I'm learning Swift right now, so if you need, this is Swift code:
class ViewController: UIViewController {
var lbInfo:UILabel?;
var theValue:Float?;
var timer:NSTimer?;
var needRecord:Bool?;
var circle:UIView?;
var dot:UIView?;
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
needRecord = false;
theValue = 0;
lbInfo = UILabel(frame: CGRect(x: 50, y: 50, width: UIScreen.mainScreen().bounds.width-100, height: 30));
lbInfo!.textAlignment = NSTextAlignment.Center;
lbInfo!.text = "Look at here!";
self.view.addSubview(lbInfo!);
circle = UIView(frame: CGRect(x: 0, y: 0, width: 50, height: 50));
circle!.layer.borderWidth = 3;
circle!.layer.borderColor = UIColor.redColor().CGColor;
circle!.layer.cornerRadius = 25;
circle!.center = self.view.center;
self.view.addSubview(circle!);
dot = UIView(frame: CGRect(x: 0, y: 0, width: 40, height: 40));
dot!.backgroundColor = UIColor.redColor();
dot!.layer.cornerRadius = 20;
dot!.center = self.view.center;
self.view.addSubview(dot!);
let pan = UIPanGestureRecognizer(target: self, action: #selector(ViewController.panhandler));
dot!.addGestureRecognizer(pan);
timer = NSTimer.scheduledTimerWithTimeInterval(0.1, target: self, selector: #selector(ViewController.timerFire), userInfo: nil, repeats: true)
}
func panhandler(pan: UIPanGestureRecognizer) -> Void {
let pt = pan.translationInView(self.view);
switch pan.state {
case UIGestureRecognizerState.Began:
draggingStart();
case UIGestureRecognizerState.Changed:
self.dot!.center = CGPoint(x: self.view.center.x, y: self.view.center.y + pt.y);
case UIGestureRecognizerState.Ended:
draggingEnded();
default:
break;
}
}
func draggingStart() -> Void {
needRecord = true;
}
func draggingEnded() -> Void {
needRecord = false;
UIView.animateWithDuration(0.1, animations: {
self.dot!.center = self.view.center;
});
}
#objc func timerFire() -> Void {
if(needRecord!){
let distance:Float = Float(self.dot!.center.y) - Float(self.view.center.y);
theValue! -= distance/1000;
self.lbInfo!.text = String(format: "%.2f", theValue!);
}
}
}
Hope it can help you.
If you still need some advice, just leave it here, I will check it latter.

This sounds like a fun challenge. I don't totally understand the hifi thing. But what I'm picturing is a circular knob with a small dot at 9 o'clock almost at the edge. When you turn the knob to the right the dot moves towards 12 oclock and the volume increases, accelerating faster the further the dot is from 9 o'clock. It continues to increase as long as the dot is above 9, just the acceleration of increase changes.
When you turn left, the dot goes towards 6 oclock, and the volume decreases, and the acceleration depends on the radial distance from 9 oclock. If this assumption is correct, I think the following would work..
I would solve this with a little trigonometry. To get the acceleration, you need the angle from the 9 o'oclock axis(negative x axis). A positive angle is increasing the volume, a negative angle is decreasing the volume, and the acceleration depends on the degree of rotation. This angle will also give you a transform that you can apply to the view to change the dot's place. I don't think this will take anything too fancy code wise.. In this case, I have made the maximum rotation to be 90 degree, or pi/2 radians. If you can actually turn the knob more than that, it would take some code changes.
var volume: Double = 0
var maxVolume: Double = 100
var increasing : Bool
var multiplier: Double = 0
var cartesianTransform = CGAffineTransform()
let knobViewFrame = CGRect(x: 50, y: 50, width: 100, height: 100)
let knobRadius: CGFloat = 45
let knob = UIView()
var timer : NSTimer?
func setTransform() {
self.cartesianTransform = CGAffineTransform(a: 1/knobRadius, b: 0, c: 0, d: -1/knobRadius, tx: knobViewFrame.width/2, ty: knobViewFrame.height * 1.5)
// admittedly, I always have to play with these things to get them right, so there may be some errors. This transform should turn the view into a plane with (0,0) at the center, and the knob's circle at (-1,0)
}
func panKnob(pan: UIPanGestureRecognizer) {
let pointInCartesian = CGPointApplyAffineTransform(pan.locationInView(pan.view!), cartesianTransform)
if pan.state == .Began {
increasing = pointInCartesian.y > 0
timer = NSTimer.scheduledTimerWithTimeInterval(0.1, target: self, selector: #selector(increaseVolume), userInfo: nil, repeats: true)
}
let arctangent = CGFloat(M_PI) - atan2(pointInCartesian.y, pointInCartesian.x)
let maxAngle = increasing ? CGFloat(M_PI)/2 : -CGFloat(M_PI)/2
var angle: CGFloat
if increasing {
angle = arctangent > maxAngle ? maxAngle : arctangent
} else {
angle = arctangent < maxAngle ? maxAngle : arctangent
}
knob.transform = CGAffineTransformMakeRotation(angle)
self.multiplier = Double(angle) * 10
if pan.state == .Ended || pan.state == .Cancelled || pan.state == .Failed {
timer?.invalidate()
UIView.animateWithDuration(0.75, delay: 0, options: .CurveEaseIn, animations: {self.knob.transform = CGAffineTransformIdentity}, completion: nil)
}
}
func increaseVolume() {
let newVolume = volume + multiplier
volume = newVolume > maxVolume ? maxVolume : (newVolume < 0 ? 0 : newVolume)
if volume == maxVolume || volume == 0 || multiplier == 0 {
timer?.invalidate()
}
}
I haven't tested the above, but this seemed like a cool puzzle. The multiplier only changes when the angle changes, and the volume keeps adding the multiplier while the timer is valid. If you want the volume to accelerate continuously without angle changing, move the multiplier change to the timer's selector, and keep the angle as a class variable so you know how fast to accelerate it.
edit: You could probably do it without the transform, by just getting the delta between the dot and the locationInView.

Related

How to convert CATransform3DTranslate to CGAffineTransform so it can Mimic a Carousel View

My question pertains to how to mimic this Carousel view Youtube video only using a UIView not it's layer or a CALayer, which means actually transforming the UIViews its self.
I found a stack overflow question that actually is able to convert a
CATransform3D into a CGAffineTransform. That was written by some genius here as an answer, but my problem is a little unique.
The animation you see below is using CALayer to create. I need to create this same animation but transforming the UIView instead of its layer.
What it's Supposed to look like:
Code (Creates animation using Layers):
This takes an image card which is a CALayer() with a image attached to it and transforms which places it in the Carousel of images.
Note: turnCarousel() is also called when the user pans which moves / animates the Carousel.
let transformLayer = CATransformLayer()
func turnCarousel() {
guard let transformSubLayers = transformLayer.sublayers else {return}
let segmentForImageCard = CGFloat(360 / transformSubLayers.count)
var angleOffset = currentAngle
for layer in transformSubLayers {
var transform = CATransform3DIdentity
transform.m34 = -1 / 500
transform = CATransform3DRotate(transform, degreeToRadians(deg: angleOffset), 0, 1, 0)
transform = CATransform3DTranslate(transform, 0, 0, 175)
CATransaction.setAnimationDuration(0)
layer.transform = transform
angleOffset += segmentForImageCard
}
}
What It Currently Looks Like:
So basically it's close, but it seems as though there is a scaling issue with the cards that are supposed to be seen as in the front and the cards that are in the back of the carousel.
Fo this what I did is use a UIImageView as the base view for the carousel and then added more UIImageViews as cards to it. So now we are trying do a transformation on a UIImageView/UIView
Code:
var carouselTestView = UIImageView()
func turnCarouselTestCarousel() {
let segmentForImageCard = CGFloat(360 / carouselTestView.subviews.count)
var angleOffset = currentAngleTestView
for subview in carouselTestView.subviews {
var transform2 = CATransform3DIdentity
transform2.m34 = -1 / 500
transform2 = CATransform3DRotate(transform2, degreeToRadians(deg: angleOffset), 0, 1, 0)
transform2 = CATransform3DTranslate(transform2, 0, 0, 175)
CATransaction.setAnimationDuration(0)
// m13, m23, m33, m43 are not important since the destination is a flat XY plane.
// m31, m32 are not important since they would multiply with z = 0.
// m34 is zeroed here, so that neutralizes foreshortening. We can't avoid that.
// m44 is implicitly 1 as CGAffineTransform's m33.
let fullTransform: CATransform3D = transform2
let affine = CGAffineTransform(a: fullTransform.m11, b: fullTransform.m12, c: fullTransform.m21, d: fullTransform.m22, tx: fullTransform.m41, ty: fullTransform.m42)
subview.transform = affine
angleOffset += segmentForImageCard
}
}
the sub image that actually make up the Carousel are add with this function which simply goes through a for loop of image named 1...6 in my assets folder.
Code:
func CreateCarousel() {
carouselTestView.frame.size = CGSize(width: self.view.frame.width, height: self.view.frame.height / 2.9)
carouselTestView.center = CGPoint(self.view.frame.width * 0.5, self.view.frame.height * 0.5)
carouselTestView.alpha = 1.0
carouselTestView.backgroundColor = UIColor.clear
carouselTestView.isUserInteractionEnabled = true
self.view.insertSubview(carouselTestView, at: 0)
for i in 1 ... 6 {
addImageCardTestCarousel(name: "\(i)")
}
// Set the carousel for the first time. So that now we can see it like an actual carousel animation
turnCarouselTestCarousel()
let panGestureRecognizerTestCarousel = UIPanGestureRecognizer(target: self, action: #selector(self.performPanActionTestCarousel(recognizer:)))
panGestureRecognizerTestCarousel.delegate = self
carouselTestView.addGestureRecognizer(panGestureRecognizerTestCarousel)
}
The addImageCardTestCarousel function is here:
Code:
func addImageCardTestCarousel(name: String) {
let imageCardSize = CGSize(width: carouselTestView.frame.width / 2, height: carouselTestView.frame.height)
let cardPanel = UIImageView()
cardPanel.frame.size = CGSize(width: imageCardSize.width, height: imageCardSize.height)
cardPanel.frame.origin = CGPoint(carouselTestView.frame.size.width / 2 - imageCardSize.width / 2 , carouselTestView.frame.size.height / 2 - imageCardSize.height / 2)
guard let imageCardImage = UIImage(named: name) else {return}
cardPanel.image = imageCardImage
cardPanel.contentMode = .scaleAspectFill
cardPanel.layer.masksToBounds = true
cardPanel.layer.borderColor = UIColor.white.cgColor
cardPanel.layer.borderWidth = 1
cardPanel.layer.cornerRadius = cardPanel.frame.height / 50
carouselTestView.addSubview(cardPanel)
}
Purpose:
The purpose of this is that I want to build a UI that can take UIViews on the rotating cards you see, and a CALayer cannot add a UIView as a subview. It can only add the UIView's layer to its own Layer. So to solve this problem I need to actually achieve this animation with UIViews not CALayers.
I solved it the view that Appears to be behind the front most view are actually grabbing all the touch even if you touch a card right in the front the back card will prevent touches for the front card. So i made a function that can calculate. Which views are in the front. Than disable and enable touches for then. Its like when 2 cards get stacked on top of each other the card to the back will stop the card from the front from taking/userInteraction.
Code:
func DetermineFrontViews(view subview: UIView, angle angleOffset: CGFloat) {
let looped = Int(angleOffset / 360) // must round down to Int()
let loopSubtractingReset = CGFloat(360 * looped) // multiply 360 how ever many times we have looped
let finalangle = angleOffset - loopSubtractingReset
if (finalangle >= -70 && finalangle <= 70) || (finalangle >= 290) || (finalangle <= -260) {
print("In front of view")
if subview.isUserInteractionEnabled == false {
subview.isUserInteractionEnabled = true
}
} else {
print("Back of view")
if subview.isUserInteractionEnabled == true {
subview.isUserInteractionEnabled = false
}
}
}
I added this to the turn function to see if it could keep track of the first card being either in the back of the carousel or the front.
if subview.layer.name == "1" {
DetermineFrontViews(view: subview, angle: angleOffset)
}

iOS -How to have a separate UIView take control of a UISlider's thumbRect

I have a button that I press and a custom alert appears with a normal UISlider inside of it. There are some other views and labels that I have that show the distance etc that are above and behind the slider. What happens is the thumbRect of the slider doesn't slide to well when touched. I have to be very accurate when trying to slide it and it seems buggy. What I want to do is add a clear UIView in front of it (and the other views and labels above it) and have the clear UIView take control of the slider using a UIGestureRecognizer.
Here's the setup so far. The clearView is what I want to use to take control of the slider. Right now it's behind everything and I colored the clearView red just so it's visible for the example
bgView.addSubview(slider) // slider added to bgView
let trackRect = slider.trackRect(forBounds: slider.frame)
let thumbRect = slider.thumbRect(forBounds: slider.bounds, trackRect: trackRect, value: slider.value)
milesLabel.center = CGPoint(x: thumbRect.midX, y: slider.frame.origin.y - 55)
bg.addSubview(milesLabel) // milesLabel added to bgView
// ** all the other views you see are added to the bgView and aligned with the thumbRect's center **
let milesLabelRect = bgView.convert(milesLabel.frame, to: self.view)
let sliderRect = bgView.convert(slider.frame, to: self.view)
let topOfMilesLabel = milesLabelRect.origin.y
let bottomOfSlider = sliderRect.maxY
let distance = bottomOfSlider - topOfMilesLabel
clearView.frame = CGRect(x: milesLabel.frame.minX, y: milesLabel.frame.minY, width: milesLabel.frame.width, height: distance)
bgView.addSubview(clearView) // clearView added to bgView
bgView.insertSubview(clearView, at: 0)
When I slide the slider everything successfully slides with it including the clearView but of course the thumbRect is still in control of everything.
#objc func onSliderValChanged(sender: UISlider, event: UIEvent) {
slider.value = round(sender.value)
UIView.animate(withDuration: 0.25, animations: {
self.slider.layoutIfNeeded()
let trackRect = sender.trackRect(forBounds: sender.frame)
let thumbRect = sender.thumbRect(forBounds: sender.bounds, trackRect: trackRect, value: sender.value)
self.milesLabel.center = CGPoint(x: thumbRect.midX, y: sender.frame.origin.y - 55)
// ** all the other views are aligned with the thumbRect's center as it slides **
self.clearView.frame.origin = self.milesLabel.frame.origin
})
}
The idea is to move the clearView to the front of everything and use that to control the thumbRect (don't forget it's only red for the example).
bgView.bringSubviewToFront(clearView)
// clearView.backgroundColor = UIColor.clear
I tried to use a UIPanGesture and a UILongPressGestureRecognizer to have the clearView take control of the slider but when I slide the slider only the thumbRect slides, the clearView stays still. Look where the clearView (which is red) is and look where the thumbRect is after I slid it to the 10 mile point.
// separately tried a UILongPressGestureRecognizer too
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(draggedView(_:)))
clearView.isUserInteractionEnabled = true
clearView.addGestureRecognizer(panGesture)
#objc func draggedView(_ sender: UIPanGestureRecognizer) {
if slider.isHighlighted { return }
let point = sender.location(in: slider)
let percentage = Float(point.x / slider.frame.width)
let delta = percentage * (slider.maximumValue - slider.minimumValue)
let value = slider.minimumValue + delta
slider.setValue(value, animated: true)
}
If I play around with it enough the clearView sometimes slides with the the thumbRect but they definitely aren't in alignment and it's very buggy. I also lost control of all the other views the were aligned with the thumbRect.
How can I pass control from the slider's thumbRect to the clearView?
You might have your mind set on doing it this way, which is okay. But I have a fast alternative for you that might suit your needs.
If you subclass your playback slider and override the point() method you can increase the surface area of the slider's thumb.
class CustomPlaybackSlider: UISlider {
// Will increase target surface area for thumb.
override public func point(inside point: CGPoint, with event: UIEvent?) -> Bool {
var bounds: CGRect = self.bounds
// Set the inset to a negative value by how much you want the surface area to increase by.
bounds = bounds.insetBy(dx: -10, dy: -15)
return bounds.contains(point)
}
}
Might simplify things for you. Although, the only downside of this approach is that the surface area for the touch would extend equally down/up for your dx value and equally left/right for your dy value.
Hope this helps.
I found the key to the problem here in this answer. #PaulaHasstenteufel said
// also remember to call valueChanged if there's any
// custom behaviour going on there and pass the slider
// variable as the parameter, as indicated below
self.sliderValueChanged(slider)
What I had to do was
Use a UIPanGestureRecognizer
comment out the UISlider's target action
use the code from the target action's selector method in step-4 instead of using it in #objc func onSliderValChanged(sender: UISlider, event: UIEvent { }
take the code from step-3 and add it to a new function
add the function from step-5 to the bottom of the UIPanGetstureRecognizer's method
1- use the UIPanGestureRecognizer and make userInteraction is enabled for the UIView
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(draggedView(_:)))
clearView.isUserInteractionEnabled = true
clearView.addGestureRecognizer(panGesture)
2- the Slider, I commented out it's addTarget for the .valueChanged event
let slider: UISlider = {
let slider = UISlider()
slider.translatesAutoresizingMaskIntoConstraints = false
slider.minimumValue = 1
slider.maximumValue = 5
slider.value = 1
slider.isContinuous = true
slider.maximumTrackTintColor = UIColor(white: 0, alpha: 0.3)
// ** COMMENT THIS OUT **
//slider.addTarget(self, action: #selector(onSliderValChanged(sender:event:)), for: .valueChanged)
return slider
}()
3- Take the code from the selector method the targetAction from above was using and instead insert that code in step 4
#objc func onSliderValChanged(sender: UISlider, event: UIEvent) {
/*
use this code in step-4 instead
slider.value = round(sender.value)
UIView.animate(withDuration: 0.25, animations: {
self.slider.layoutIfNeeded()
let trackRect = sender.trackRect(forBounds: sender.frame)
let thumbRect = sender.thumbRect(forBounds: sender.bounds, trackRect: trackRect, value: sender.value)
self.milesLabel.center = CGPoint(x: thumbRect.midX, y: sender.frame.origin.y - 55)
// ** all the other views are aligned with the thumbRect's center as it slides **
self.clearView.frame.origin = self.milesLabel.frame.origin
})
*/
}
4- Create a new method and insert the code from step-3
func sliderValueChanged(_ sender: UISlider, value: Float) {
// round the value for smooth sliding
sender.value = round(value)
UIView.animate(withDuration: 0.25, animations: {
self.slider.layoutIfNeeded()
let trackRect = sender.trackRect(forBounds: sender.frame)
let thumbRect = sender.thumbRect(forBounds: sender.bounds, trackRect: trackRect, value: sender.value)
self.milesLabel.center = CGPoint(x: thumbRect.midX, y: sender.frame.origin.y - 55)
// ** all the other views are aligned with the thumbRect's center as it slides **
self.clearView.frame.origin = self.milesLabel.frame.origin
})
}
5- I used the same code that I used for the UIGestureRecognizer but commented out the setValue function and instead used the function from step 4
#objc fileprivate func draggedView(_ sender: UIPanGestureRecognizer) {
if slider.isHighlighted { return }
let point = sender.location(in: slider)
let percentage = Float(point.x / slider.bounds.size.width)
let delta = percentage * (slider.maximumValue - slider.minimumValue)
let value = slider.minimumValue + delta
// I had to comment this out because the siding was to abrupt
//slider.setValue(value, animated: true)
// ** CALLING THIS FUNCTION FROM STEP-3 IS THE KEY TO GET THE UIView TO CONTROL THE SLIDER **
sliderValueChanged(slider, value: value)
}

can't access object within UIPanGestureRecognizer

I am doing a bezier animation with programmatically generated objects. The number of objects could be bigger than 100. These objects are also responsive to touch.
The animation works fine. Now I would like to make some objects drag-able.
However, I can't access the individual objects in the UIPanGestureRecognizer function. I assume I m doing something wrong with class / subclass calling, but can't think of it..
The tutorials I looked into had IBOutlets or dedicated class variables for every animated object on screen.. I do have a potential high number of objects that are generated within the FOR loop..
What approach do you suggest?
func panning(pan: UIPanGestureRecognizer) {
self.view.bringSubviewToFront(pan.view!)
var translation = pan.translationInView(self.view)
pan.view.center = CGPointMake(pan.view.center.x + translation.x, pan.view.center.y + translation.y)
pan.setTranslation(CGPointZero, inView: self.view)
let panGesture = UIPanGestureRecognizer(target: self, action: "panning:");
}
}
.. within viewDidLoad the function gets called:
// loop from 0 to 50
for i in 0...50 {
// create a square object
let square = UIView()
square.frame = CGRect(x: 55, y: 300, width: 40, height: 40)
square.backgroundColor = UIColor.redColor()
self.view.addSubview(square)
let recognizer = UIPanGestureRecognizer(target: self, action: "panning:");
recognizer.delegate = self;
square.addGestureRecognizer(recognizer)
panGesture.delegate = self;
Thanks to the feedback from Rob, here's an updated version:
I don't really know how to do add the AllowUserInteraction to CAKeyframeAnimation (Rob's answer 3a), as it does not have an "option" field (I m a noob).
Stopping the animation for the object being moved (Rob's answer 3b) completely eludes me. But its something definitely necessary in here.
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
srand48(Int(NSDate().timeIntervalSince1970))
// loop from 0 to 5
for i in 0...5 {
// create a square
let square = UIView()
square.frame = CGRect(x: 55, y: 300, width: 40, height: 40)
square.backgroundColor = UIColor.redColor()
self.view.addSubview(square)
//square.userInteractionEnabled = true;
let recognizer = UIPanGestureRecognizer(target: self, action: "panning:");
square.addGestureRecognizer(recognizer)
// randomly create a value between 0.0 and 150.0
let randomYOffset = CGFloat( drand48() * 150)
// for every y-value on the bezier curve
// add our random y offset so that each individual animation
// will appear at a different y-position
let path = UIBezierPath()
path.moveToPoint(CGPoint(x: -16,y: 239 + randomYOffset))
path.addCurveToPoint(CGPoint(x: 375, y: 239 + randomYOffset), controlPoint1: CGPoint(x: 136, y: 373 + randomYOffset), controlPoint2: CGPoint(x: 178, y: 110 + randomYOffset))
// create the animation
let anim = CAKeyframeAnimation(keyPath: "position")
anim.path = path.CGPath
anim.rotationMode = kCAAnimationRotateAuto
anim.repeatCount = Float.infinity
//anim.duration = 5.0
// each square will take between 4.0 and 8.0 seconds
// to complete one animation loop
anim.duration = 4.0 + 3 * drand48()
// stagger each animation by a random value
// `290` was chosen simply by experimentation
anim.timeOffset = 290 * drand48()
// add the animation
square.layer.addAnimation(anim, forKey: "animate position along path")
}
}
func panning(pan: UIPanGestureRecognizer) {
if pan.state == .Began {
println("pan began")
self.view.bringSubviewToFront(pan.view!)
} else if pan.state == .Changed {
println("pan state changed")
var translation = pan.translationInView(self.view)
pan.view?.center = CGPointMake(pan.view!.center.x + translation.x, pan.view!.center.y + translation.y)
pan.setTranslation(CGPointZero, inView: self.view)
println("translation")
}
}
Two things jump out at me:
Your for loop is setting delegates for recognizer (which is not needed unless you're doing something particular which you haven't shared with us) and panGesture (which should be removed because panGesture is not a variable that you have set in this for loop and seems to be completely unrelated). Thus it would be:
for i in 0...50 {
let square = UIView()
square.frame = CGRect(x: 55, y: 300, width: 40, height: 40)
square.backgroundColor = UIColor.redColor()
self.view.addSubview(square)
let recognizer = UIPanGestureRecognizer(target: self, action: "panning:");
square.addGestureRecognizer(recognizer)
}
In panning you are instantiating a new panGesture: You definitely want to get rid of that. A gesture handler has no business creating new gestures. So get rid of the line that starts with let panGesture = ....
In your original question, you hadn't shared the animation you're doing. If you were animating using block-based animation, you have to specify the UIViewAnimationOptions.AllowUserInteraction options. Also, if you'd start dragging it around a view that is being animated, you'd also want to (a) stop the animation for that view; and (b) reset the frame in accordance with the presentationLayer.
But, it's now clear that you're using CAKeyframeAnimation, in which case, there is no AllowUserInteraction mechanism. So, instead, you have to add the gesture recognizer to the superview, and then iterate through the frames of the squares (as represented by their presentation layer, i.e. the location mid-animation) and test to see if you get any hits.
It's up to you, but I find the pan gesture is a little slow to start recognizing the gesture (as it differentiates between a pan and a tap. In this case, you want something a little more responsive, methinks, so I might use a long press gesture recognizer with a
So, pulling that all together, you end up with something like:
var squares = [UIView]() // an array to keep track of all of the squares
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
srand48(Int(NSDate().timeIntervalSince1970))
// loop from 0 to 5
for i in 0...5 {
// create a square
let square = UIView()
square.frame = CGRect(x: 55, y: 300, width: 40, height: 40)
square.backgroundColor = UIColor.redColor()
self.view.addSubview(square)
squares.append(square)
// randomly create a value between 0.0 and 150.0
let randomYOffset = CGFloat( drand48() * 150)
// for every y-value on the bezier curve
// add our random y offset so that each individual animation
// will appear at a different y-position
let path = UIBezierPath()
path.moveToPoint(CGPoint(x: -16,y: 239 + randomYOffset))
path.addCurveToPoint(CGPoint(x: 375, y: 239 + randomYOffset), controlPoint1: CGPoint(x: 136, y: 373 + randomYOffset), controlPoint2: CGPoint(x: 178, y: 110 + randomYOffset))
// create the animation
let anim = CAKeyframeAnimation(keyPath: "position")
anim.path = path.CGPath
anim.rotationMode = kCAAnimationRotateAuto
anim.repeatCount = Float.infinity
//anim.duration = 5.0
// each square will take between 4.0 and 8.0 seconds
// to complete one animation loop
anim.duration = 4.0 + 3 * drand48()
// stagger each animation by a random value
// `290` was chosen simply by experimentation
anim.timeOffset = 290 * drand48()
// add the animation
square.layer.addAnimation(anim, forKey: "animate position along path")
}
let recognizer = UILongPressGestureRecognizer(target: self, action: "handleLongPress:");
recognizer.minimumPressDuration = 0
view.addGestureRecognizer(recognizer)
}
var viewToDrag: UIView! // which of the squares are we dragging
var viewToDragCenter: CGPoint! // what was the `center` of `viewToDrag` when we started to drag it
var originalLocation: CGPoint! // what was gesture.locationInView(gesture.view!) when we started dragging
func handleLongPress(gesture: UILongPressGestureRecognizer) {
let location = gesture.locationInView(gesture.view!)
if gesture.state == .Began {
for square in squares {
let presentationLayer = square.layer.presentationLayer() as CALayer
let frame = presentationLayer.frame
if CGRectContainsPoint(frame, location) {
viewToDrag = square
viewToDragCenter = CGPointMake(CGRectGetMidX(frame), CGRectGetMidY(frame))
viewToDrag.layer.removeAnimationForKey("animate position along path")
viewToDrag.center = viewToDragCenter
originalLocation = location
return
}
}
} else if gesture.state == .Changed {
if viewToDrag != nil {
var translation = CGPointMake(location.x - originalLocation.x, location.y - originalLocation.y)
viewToDrag.center = CGPointMake(viewToDragCenter.x + translation.x, viewToDragCenter.y + translation.y)
}
} else if gesture.state == .Ended {
viewToDrag = nil
}
}

iOS Floating Video Window like Youtube App

Does anyone know of any existing library, or any techniques on how to get the same effect as is found on the Youtube App.
The video can be "minimised" and hovers at the bottom of the screen - which can then be swiped to close or touched to re-maximised.
See:
Video Playing Normally: https://www.dropbox.com/s/o8c1ntfkkp4pc4q/2014-06-07%2001.19.20.png
Video Minimized: https://www.dropbox.com/s/w0syp3infu21g08/2014-06-07%2001.19.27.png
(Notice how the video is now in a small floating window on the bottom right of the screen).
Anyone have any idea how this was achieved, and if there are any existing tutorials or libraries that can be used to get this same effect?
It sounded fun, so I looked at youtube. The video looks like it plays in a 16:9 box at the top, with a "see also" list below. When user minimizes the video, the player drops to the lower right corner along with the "see also" view. At the same time, that "see also" view fades to transparent.
1) Setup the views like that and created outlets. Here's what it looks like in IB. (Note that the two containers are siblings)
2) Give the video view a swipe up and swipe down gesture recognizer:
#interface ViewController ()
#property (weak, nonatomic) IBOutlet UIView *tallMpContainer;
#property (weak, nonatomic) IBOutlet UIView *mpContainer;
#end
#implementation ViewController
- (void)viewDidLoad
{
[super viewDidLoad];
UISwipeGestureRecognizer *swipeDown = [[UISwipeGestureRecognizer alloc] initWithTarget:self action:#selector(swipeDown:)];
UISwipeGestureRecognizer *swipeUp = [[UISwipeGestureRecognizer alloc] initWithTarget:self action:#selector(swipeUp:)];
swipeUp.direction = UISwipeGestureRecognizerDirectionUp;
swipeDown.direction = UISwipeGestureRecognizerDirectionDown;
[self.mpContainer addGestureRecognizer:swipeUp];
[self.mpContainer addGestureRecognizer:swipeDown];
}
- (void)swipeDown:(UIGestureRecognizer *)gr {
[self minimizeMp:YES animated:YES];
}
- (void)swipeUp:(UIGestureRecognizer *)gr {
[self minimizeMp:NO animated:YES];
}
3) And then a method to know about the current state, and change the current state.
- (BOOL)mpIsMinimized {
return self.tallMpContainer.frame.origin.y > 0;
}
- (void)minimizeMp:(BOOL)minimized animated:(BOOL)animated {
if ([self mpIsMinimized] == minimized) return;
CGRect tallContainerFrame, containerFrame;
CGFloat tallContainerAlpha;
if (minimized) {
CGFloat mpWidth = 160;
CGFloat mpHeight = 90; // 160:90 == 16:9
CGFloat x = 320-mpWidth;
CGFloat y = self.view.bounds.size.height - mpHeight;
tallContainerFrame = CGRectMake(x, y, 320, self.view.bounds.size.height);
containerFrame = CGRectMake(x, y, mpWidth, mpHeight);
tallContainerAlpha = 0.0;
} else {
tallContainerFrame = self.view.bounds;
containerFrame = CGRectMake(0, 0, 320, 180);
tallContainerAlpha = 1.0;
}
NSTimeInterval duration = (animated)? 0.5 : 0.0;
[UIView animateWithDuration:duration animations:^{
self.tallMpContainer.frame = tallContainerFrame;
self.mpContainer.frame = containerFrame;
self.tallMpContainer.alpha = tallContainerAlpha;
}];
}
I didn't add video to this project, but it should just drop in. Make the mpContainer the parent view of the MPMoviePlayerController's view and it should look pretty cool.
Use TFSwipeShrink and customize code for your project.
hope to help you.
Update new framwork FWDraggableSwipePlayer for drag uiview like YouTube app.
hope to help you.
This is a swift 3 version for the answer #danh had provided earlier.
https://stackoverflow.com/a/24107949/1211470
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var tallMpContainer: UIView!
#IBOutlet weak var mpContainer: UIView!
var swipeDown: UISwipeGestureRecognizer?
var swipeUp: UISwipeGestureRecognizer?
override func viewDidLoad() {
super.viewDidLoad()
swipeDown = UISwipeGestureRecognizer(target: self, action: #selector(swipeDownAction))
swipeUp = UISwipeGestureRecognizer(target: self, action: #selector(swipeUpAction))
swipeDown?.direction = .down
swipeUp?.direction = .up
self.mpContainer.addGestureRecognizer(swipeDown!)
self.mpContainer.addGestureRecognizer(swipeUp!)
}
#objc func swipeDownAction() {
minimizeWindow(minimized: true, animated: true)
}
#objc func swipeUpAction() {
minimizeWindow(minimized: false, animated: true)
}
func isMinimized() -> Bool {
return CGFloat((self.tallMpContainer?.frame.origin.y)!) > CGFloat(20)
}
func minimizeWindow(minimized: Bool, animated: Bool) {
if isMinimized() == minimized {
return
}
var tallContainerFrame: CGRect
var containerFrame: CGRect
var tallContainerAlpha: CGFloat
if minimized == true {
let mpWidth: CGFloat = 160
let mpHeight: CGFloat = 90
let x: CGFloat = 320-mpWidth
let y: CGFloat = self.view.bounds.size.height - mpHeight;
tallContainerFrame = CGRect(x: x, y: y, width: 320, height: self.view.bounds.size.height)
containerFrame = CGRect(x: x, y: y, width: mpWidth, height: mpHeight)
tallContainerAlpha = 0.0
} else {
tallContainerFrame = self.view.bounds
containerFrame = CGRect(x: 0, y: 0, width: 320, height: 180)
tallContainerAlpha = 1.0
}
let duration: TimeInterval = (animated) ? 0.5 : 0.0
UIView.animate(withDuration: duration, animations: {
self.tallMpContainer.frame = tallContainerFrame
self.mpContainer.frame = containerFrame
self.tallMpContainer.alpha = tallContainerAlpha
})
}
}

Can UIPinchGestureRecognizer and UIPanGestureRecognizer Be Merged?

I am struggling a bit trying to figure out if it is possible to create a single combined gesture recognizer that combines UIPinchGestureRecognizer with UIPanGestureRecognizer.
I am using pan for view translation and pinch for view scaling. I am doing incremental matrix concatenation to derive a resultant final transformation matrix that is applied to the view. This matrix has both scale and translation. Using separate gesture recognizers leads to a jittery movement/scaling. Not what I want. Thus, I want to handle concatenation of scale and translation once within a single gesture. Can someone please shed some light on how to do this?
6/14/14: Updated Sample Code for iOS 7+ with ARC.
The UIGestureRecognizers can work together and you just need to make sure you don't trash the current view's transform matrix. Use the CGAffineTransformScale method and related methods that take a transform as input, rather than creating it from scratch (unless you maintain the current rotation, scale, or translation yourself.
Download Xcode Project
Sample UIPinchGesture project on Github.
Note: iOS 7 behaves weird with UIView's in IB that have Pan/Pinch/Rotate gestures applied. iOS 8 fixes it, but my workaround is to add all views in code like this code example.
Demo Video
Add them to a view and conform to the UIGestureRecognizerDelegate protocol
#interface ViewController () <UIGestureRecognizerDelegate>
#end
#implementation ViewController
- (void)viewDidLoad
{
[super viewDidLoad];
UIView *blueView = [[UIView alloc] initWithFrame:CGRectMake(100, 100, 150, 150)];
blueView.backgroundColor = [UIColor blueColor];
[self.view addSubview:blueView];
[self addMovementGesturesToView:blueView];
// UIImageView's and UILabel's don't have userInteractionEnabled by default!
UIImageView *imageView = [[UIImageView alloc] initWithImage:[UIImage imageNamed:#"BombDodge.png"]]; // Any image in Xcode project
imageView.center = CGPointMake(100, 250);
[imageView sizeToFit];
[self.view addSubview:imageView];
[self addMovementGesturesToView:imageView];
// Note: Changing the font size would be crisper than zooming a font!
UILabel *label = [[UILabel alloc] init];
label.text = #"Hello Gestures!";
label.font = [UIFont systemFontOfSize:30];
label.textColor = [UIColor blackColor];
[label sizeToFit];
label.center = CGPointMake(100, 400);
[self.view addSubview:label];
[self addMovementGesturesToView:label];
}
- (void)addMovementGesturesToView:(UIView *)view {
view.userInteractionEnabled = YES; // Enable user interaction
UIPanGestureRecognizer *panGesture = [[UIPanGestureRecognizer alloc] initWithTarget:self action:#selector(handlePanGesture:)];
panGesture.delegate = self;
[view addGestureRecognizer:panGesture];
UIPinchGestureRecognizer *pinchGesture = [[UIPinchGestureRecognizer alloc] initWithTarget:self action:#selector(handlePinchGesture:)];
pinchGesture.delegate = self;
[view addGestureRecognizer:pinchGesture];
}
Implement gesture methods
- (void)handlePanGesture:(UIPanGestureRecognizer *)panGesture {
CGPoint translation = [panGesture translationInView:panGesture.view.superview];
if (UIGestureRecognizerStateBegan == panGesture.state ||UIGestureRecognizerStateChanged == panGesture.state) {
panGesture.view.center = CGPointMake(panGesture.view.center.x + translation.x,
panGesture.view.center.y + translation.y);
// Reset translation, so we can get translation delta's (i.e. change in translation)
[panGesture setTranslation:CGPointZero inView:self.view];
}
// Don't need any logic for ended/failed/canceled states
}
- (void)handlePinchGesture:(UIPinchGestureRecognizer *)pinchGesture {
if (UIGestureRecognizerStateBegan == pinchGesture.state ||
UIGestureRecognizerStateChanged == pinchGesture.state) {
// Use the x or y scale, they should be the same for typical zooming (non-skewing)
float currentScale = [[pinchGesture.view.layer valueForKeyPath:#"transform.scale.x"] floatValue];
// Variables to adjust the max/min values of zoom
float minScale = 1.0;
float maxScale = 2.0;
float zoomSpeed = .5;
float deltaScale = pinchGesture.scale;
// You need to translate the zoom to 0 (origin) so that you
// can multiply a speed factor and then translate back to "zoomSpace" around 1
deltaScale = ((deltaScale - 1) * zoomSpeed) + 1;
// Limit to min/max size (i.e maxScale = 2, current scale = 2, 2/2 = 1.0)
// A deltaScale is ~0.99 for decreasing or ~1.01 for increasing
// A deltaScale of 1.0 will maintain the zoom size
deltaScale = MIN(deltaScale, maxScale / currentScale);
deltaScale = MAX(deltaScale, minScale / currentScale);
CGAffineTransform zoomTransform = CGAffineTransformScale(pinchGesture.view.transform, deltaScale, deltaScale);
pinchGesture.view.transform = zoomTransform;
// Reset to 1 for scale delta's
// Note: not 0, or we won't see a size: 0 * width = 0
pinchGesture.scale = 1;
}
}
- (BOOL)gestureRecognizer:(UIGestureRecognizer *)gestureRecognizer shouldRecognizeSimultaneouslyWithGestureRecognizer:(UIGestureRecognizer *)otherGestureRecognizer {
return YES; // Works for most use cases of pinch + zoom + pan
}
Resources
Xcode Gesture Sample Project
Apple's Gestures Guide
If anyone is interested in a Swift implementation of this using Metal to do the rendering, I have a project available here.
Swift
Many thanks a lot to Paul!!! Here is his Swift version:
import UIKit
class ViewController: UIViewController, UIGestureRecognizerDelegate {
var editorView: EditorView!
override func viewDidLoad() {
super.viewDidLoad()
let blueView = UIView(frame: .init(x: 100, y: 100, width: 300, height: 300))
view.addSubview(blueView)
blueView.backgroundColor = .blue
addMovementGesturesToView(blueView)
}
func addMovementGesturesToView(_ view: UIView) {
view.isUserInteractionEnabled = true
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(handlePanGesture(_:)))
panGesture.delegate = self
view.addGestureRecognizer(panGesture)
let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(handlePinchGesture(_:)))
pinchGesture.delegate = self
view.addGestureRecognizer(pinchGesture)
}
#objc private func handlePanGesture(_ panGesture: UIPanGestureRecognizer) {
guard let panView = panGesture.view else { return }
let translation = panGesture.translation(in: panView.superview)
if panGesture.state == .began || panGesture.state == .changed {
panGesture.view?.center = CGPoint(x: panView.center.x + translation.x, y: panView.center.y + translation.y)
// Reset translation, so we can get translation delta's (i.e. change in translation)
panGesture.setTranslation(.zero, in: self.view)
}
// Don't need any logic for ended/failed/canceled states
}
#objc private func handlePinchGesture(_ pinchGesture: UIPinchGestureRecognizer) {
guard let pinchView = pinchGesture.view else { return }
if pinchGesture.state == .began || pinchGesture.state == .changed {
let currentScale = scale(for: pinchView.transform)
// Variables to adjust the max/min values of zoom
let minScale: CGFloat = 0.2
let maxScale: CGFloat = 3
let zoomSpeed: CGFloat = 0.8
var deltaScale = pinchGesture.scale
// You need to translate the zoom to 0 (origin) so that you
// can multiply a speed factor and then translate back to "zoomSpace" around 1
deltaScale = ((deltaScale - 1) * zoomSpeed) + 1
// Limit to min/max size (i.e maxScale = 2, current scale = 2, 2/2 = 1.0)
// A deltaScale is ~0.99 for decreasing or ~1.01 for increasing
// A deltaScale of 1.0 will maintain the zoom size
deltaScale = min(deltaScale, maxScale / currentScale)
deltaScale = max(deltaScale, minScale / currentScale)
let zoomTransform = pinchView.transform.scaledBy(x: deltaScale, y: deltaScale)
pinchView.transform = zoomTransform
// Reset to 1 for scale delta's
// Note: not 0, or we won't see a size: 0 * width = 0
pinchGesture.scale = 1
}
}
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return true
}
private func scale(for transform: CGAffineTransform) -> CGFloat {
return sqrt(CGFloat(transform.a * transform.a + transform.c * transform.c))
}
}
Demo (on Simulator):

Resources