Resize and Rotate image Annotation added in pdf - ios

Using this Link I have added signature image annotation in PDF file
But i could not find any guide for how to rotate and resize image annotation using the button added on top of annotation image like shown in the image.
What i want to do is:
want to scale/resize signature image(make it small or large by adding this resize button)
want to rotate signature image
For Pinch to zoom i am adding pinch gesture to PDFView but that gesture zoom in / zoom out the main pdf.tried to fix it by below code but not worked.
#objc func scale(sender : UIPinchGestureRecognizer) {
print("----------Scale----------")
let touchLocation = sender.location(in: pdfContainerView)
guard let page = pdfContainerView.page(for: touchLocation, nearest: true)
else {
return
}
let locationOnPage = pdfContainerView.convert(touchLocation, to: page)
switch sender.state {
case .began:
guard let annotation = page.annotation(at: locationOnPage) else {
return
}
if annotation.isKind(of: ImageStampAnnotation.self) {
currentlySelectedAnnotation = annotation
// to disable pinch gesture for pdfview but it is not working
pdfContainerView.minScaleFactor = pdfContainerView.scaleFactor
pdfContainerView.maxScaleFactor = pdfContainerView.scaleFactor
}
case .changed,.ended:
guard let annotation = currentlySelectedAnnotation else {
return
}
let initialBounds = annotation.bounds
//scale annotation
case .cancelled:
break
default:
break
}
}
Thanks in advance!!

I tried to implement rotation, zoom, pan on PDFview.
I created a new view above the PDFAnnotation and then moved, rotated, and scaled the view as above.
about PDFAnnotation:
class PDFImageAnnotation: PDFAnnotation {
let image: UIImage
let originalBounds: CGRect
/// 0 - 360
var angle: CGFloat = 0 {
didSet {
// reload annotation
shouldDisplay = true
}
}
/// scale annotation
var scale: CGFloat = 1 {
didSet {
// Scale on the original size
let width = originalBounds.width * scale
let height = originalBounds.height * scale
// move origin
let x = bounds.origin.x - (width - bounds.width)/2
let y = bounds.origin.y - (height - bounds.height)/2
print("new ---- \(CGRect(x: x, y: y, width: width, height: height))")
// Setting the bounds will automatically re-render
bounds = CGRect(x: x, y: y, width: width, height: height)
}
}
/// move center point
var center: CGPoint = .zero {
didSet {
let x = center.x - bounds.width/2.0
let y = center.y - bounds.height/2.0
// Setting the bounds will automatically re-render
bounds = CGRect(origin: CGPoint(x: x, y: y), size: bounds.size)
}
}
public init(bounds: CGRect, image: UIImage) {
self.image = image
originalBounds = bounds
super.init(bounds: bounds, forType: .ink, withProperties: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func draw(with box: PDFDisplayBox, in context: CGContext) {
super.draw(with: box, in: context)
print("PDFImageAnnotation bounds - \(bounds)")
guard let page = page else {
return
}
UIGraphicsPushContext(context)
context.saveGState()
// rotate annotation
// The origin of the annotation is always at the initial position
let translateX = bounds.width/2 + bounds.origin.x
let translateY = bounds.height/2 + bounds.origin.y
// The page has its own rotation Angle
let newAngle = angle + CGFloat(page.rotation)
context.translateBy(x: translateX, y: translateY)
context.rotate(by: newAngle*(CGFloat.pi/180.0))
context.translateBy(x: -translateX, y: -translateY)
// draw image
if let cgImage = image.cgImage {
context.draw(cgImage, in: bounds)
}
context.restoreGState()
UIGraphicsPopContext()
}
}
use:
extension ViewController: PDFSignAnnotationViewDelegate {
func signAnnotationView(_ view: PDFSignAnnotationView, didMove point: CGPoint) {
guard let page = pdfContainerView.currentPage,
let imageAnnotation = page.annotations.filter({$0.userName == view.identity}).first as? PDFImageAnnotation else {
return
}
let locationOnPage = self.pdfContainerView.convert(point, to: page)
imageAnnotation.center = locationOnPage
}
func signAnnotationView(_ view: PDFSignAnnotationView, didScale scale: CGFloat) {
guard let page = pdfContainerView.currentPage,
let imageAnnotation = page.annotations.filter({$0.userName == view.identity}).first as? PDFImageAnnotation else {
return
}
imageAnnotation.scale = scale
}
func signAnnotationView(_ view: PDFSignAnnotationView, didRotate angle: CGFloat) {
guard let page = pdfContainerView.currentPage,
let imageAnnotation = page.annotations.filter({$0.userName == view.identity}).first as? PDFImageAnnotation else {
return
}
print("didRotate - \(angle)")
imageAnnotation.angle = -angle
}
func signAnnotationView(_ view: PDFSignAnnotationView, close identity: String) {
guard let page = pdfContainerView.currentPage else {
return
}
guard let annotation = page.annotations.filter({$0.userName == identity}).first else {
return
}
page.removeAnnotation(annotation)
}
}
TODO:
Move and zoom the page without sending changes to the upper view
I refer to here: https://medium.com/#rajejones/add-a-signature-to-pdf-using-pdfkit-with-swift-7f13f7faad3e
Demo https://github.com/roMummy/PDFSignature/tree/master/PDFSignature

Related

Swipeable CIFilter over video

I am currently trying to implement something similar to Instagram's story feature where you take a picture or a video and when swiping left or right you change the current filter over the content. ( here is an example of what I managed to do in my app for images https://imgur.com/a/pYKrPkA )
As you can see in the example, I got it done for images but now my problem is that I am trying to make if work for videos aswell and I am a bit lost from where to start.
final class Filter: NSObject {
var isEnabled: Bool = true
var overlayImage: CIImage?
var ciFilter: CIFilter?
init(ciFilter: CIFilter?) {
self.ciFilter = ciFilter
super.init()
}
/// Empty filter for the original photo
static func emptyFilter() -> Filter {
return Filter(ciFilter: nil)
}
func imageByProcessingImage(_ image: CIImage, at time: CFTimeInterval) -> CIImage? {
guard isEnabled else { return image }
var image = image
if let overlayImage = overlayImage {
image = overlayImage.composited(over: image)
}
guard let ciFilter = ciFilter else {
return image
}
ciFilter.setValue(image, forKey: kCIInputImageKey)
return ciFilter.value(forKey: kCIOutputImageKey) as? CIImage
}
}
class StoriesImageView: UIView {
private var metalView: MTKView?
private var ciImage: CIImage?
private var preferredCIImageTransform: CGAffineTransform?
private let device = MTLCreateSystemDefaultDevice()
private var commandQueue: MTLCommandQueue?
private var context: CIContext?
override func layoutSubviews() {
super.layoutSubviews()
metalView?.frame = bounds
}
override func setNeedsDisplay() {
super.setNeedsDisplay()
metalView?.setNeedsDisplay()
}
func setImage(with image: UIImage) {
preferredCIImageTransform = preferredCIImageTransform(from: image)
if let cgImage = image.cgImage {
ciImage = CIImage(cgImage: cgImage)
loadContextIfNeeded()
}
setNeedsDisplay()
}
/// Return the image fitted to 1080x1920.
func renderedUIImage() -> UIImage? {
return renderedUIImage(in: CGRect(origin: .zero, size: CGSize(width: 1080, height: 1920)))
}
/// Returns CIImage in fitted to main screen bounds.
func renderedCIIImage() -> CIImage? {
return renderedCIImage(in: CGRect(rect: bounds, contentScale: UIScreen.main.scale))
}
func renderedUIImage(in rect: CGRect) -> UIImage? {
if let image = renderedCIImage(in: rect), let context = context {
if let imageRef = context.createCGImage(image, from: image.extent) {
return UIImage(cgImage: imageRef)
}
}
return nil
}
func renderedCIImage(in rect: CGRect) -> CIImage? {
if var image = ciImage, let transform = preferredCIImageTransform {
image = image.transformed(by: transform)
return scaleAndResize(image, for: rect)
}
return nil
}
private func cleanupContext() {
metalView?.removeFromSuperview()
metalView?.releaseDrawables()
metalView = nil
}
private func loadContextIfNeeded() {
setContext()
}
private func setContext() {
let mView = MTKView(frame: bounds, device: device)
mView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mView.framebufferOnly = false
mView.enableSetNeedsDisplay = true
mView.contentScaleFactor = contentScaleFactor
mView.delegate = self
metalView = mView
commandQueue = device?.makeCommandQueue()
context = CIContext(mtlDevice: device!)
insertSubview(metalView!, at: 0)
}
private func scaleAndResize(_ image: CIImage, for rect: CGRect) -> CIImage {
let imageSize = image.extent.size
let horizontalScale = rect.size.width / imageSize.width
let verticalScale = rect.size.height / imageSize.height
let scale = min(horizontalScale, verticalScale)
return image.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
}
private func preferredCIImageTransform(from image: UIImage) -> CGAffineTransform {
if image.imageOrientation == .up {
return .identity
}
var transform: CGAffineTransform = .identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: .pi / -2)
case .up, .upMirrored: break
#unknown default: fatalError("Unknown image orientation")
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right: break
#unknown default: fatalError("Unknown image orientation")
}
return transform
}
}
extension StoriesImageView: MTKViewDelegate {
func draw(in view: MTKView) {
autoreleasepool {
let rect = CGRect(rect: view.bounds, contentScale: UIScreen.main.scale)
if let image = renderedCIImage(in: rect) {
let commandBuffer = commandQueue?.makeCommandBuffer()
guard let drawable = view.currentDrawable else {
return
}
let heightDifference = (view.drawableSize.height - image.extent.size.height) / 2
let destination = CIRenderDestination(width: Int(view.drawableSize.width),
height: Int(view.drawableSize.height - heightDifference),
pixelFormat: view.colorPixelFormat,
commandBuffer: commandBuffer,
mtlTextureProvider: { () -> MTLTexture in
return drawable.texture
})
_ = try? context?.startTask(toRender: image, to: destination)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
}
final class StoriesSwipeableImageView: StoriesImageView {
private let scrollView: UIScrollView = UIScrollView()
private let preprocessingFilter: Filter? = nil
var isRefreshingAutomaticallyWhenScrolling: Bool = true
var filters: [Filter]? {
didSet {
updateScrollViewContentSize()
updateCurrentSelected(notify: true)
}
}
var selectedFilter: Filter? {
didSet {
if selectedFilter != oldValue {
setNeedsLayout()
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setup()
}
override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
updateScrollViewContentSize()
}
private func setup() {
scrollView.delegate = self
scrollView.isPagingEnabled = true
scrollView.showsHorizontalScrollIndicator = false
scrollView.showsVerticalScrollIndicator = false
scrollView.bounces = true
scrollView.alwaysBounceVertical = true
scrollView.alwaysBounceHorizontal = true
scrollView.backgroundColor = .clear
addSubview(scrollView)
}
private func updateScrollViewContentSize() {
let filterCount = filters?.count ?? 0
scrollView.contentSize = CGSize(width: filterCount * Int(frame.size.width) * 3,
height: Int(frame.size.height))
if let selectedFilter = selectedFilter {
scroll(to: selectedFilter, animated: false)
}
}
private func scroll(to filter: Filter, animated: Bool) {
if let index = filters?.firstIndex(where: { $0 === filter }) {
let contentOffset = CGPoint(x: scrollView.contentSize.width / 3 + scrollView.frame.size.width * CGFloat(index), y: 0)
scrollView.setContentOffset(contentOffset, animated: animated)
updateCurrentSelected(notify: false)
} else {
fatalError("Filter is not available in filters collection")
}
}
private func updateCurrentSelected(notify: Bool) {
guard frame.size.width != 0 else { return }
let filterCount = filters?.count ?? 0
let selectedIndex = Int(scrollView.contentOffset.x + scrollView.frame.size.width / 2) / Int(scrollView.frame.size.width) % filterCount
var newFilterGroup: Filter?
if selectedIndex >= 0 && selectedIndex < filterCount {
newFilterGroup = filters?[selectedIndex]
} else {
fatalError("Invalid contentOffset")
}
if selectedFilter != newFilterGroup {
selectedFilter = newFilterGroup
if notify {
// Notify delegate?
}
}
}
override func renderedCIImage(in rect: CGRect) -> CIImage? {
guard var image = super.renderedCIImage(in: rect) else {
print("Failed to render image")
return nil
}
let timeinterval: CFTimeInterval = 0
if let preprocessingFilter = self.preprocessingFilter {
image = preprocessingFilter.imageByProcessingImage(image, at: timeinterval)!
}
let extent = image.extent
let contentSize = scrollView.bounds.size
if contentSize.width == 0 {
return image
}
let filtersCount = filters?.count ?? 0
if filtersCount == 0 {
return image
}
let ratio = scrollView.contentOffset.x / contentSize.width
var index = Int(ratio)
let upIndex = Int(ceil(ratio))
let remaningRatio = ratio - CGFloat(index)
var xImage = extent.size.width * -remaningRatio
var outputImage: CIImage? = CIImage(color: CIColor(red: 0, green: 0, blue: 0))
while index <= upIndex {
let currentIndex = index % filtersCount
let filter = filters?[currentIndex]
var filteredImage = filter?.imageByProcessingImage(image, at: timeinterval)
filteredImage = filteredImage?.cropped(to:
CGRect(x: extent.origin.x + xImage,
y: extent.origin.y,
width: extent.size.width,
height: extent.size.height)
)
guard let output = outputImage else { return nil }
outputImage = filteredImage?.composited(over: output)
xImage += extent.size.width
index += 1
}
outputImage = outputImage?.cropped(to: extent)
return outputImage
}
}
extension StoriesSwipeableImageView: UIScrollViewDelegate {
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let width = scrollView.frame.size.width
let contentOffsetX = scrollView.contentOffset.x
let contentSizeWidth = scrollView.contentSize.width
let normalWidth = CGFloat(filters?.count ?? 0) * width
if width > 0 && contentSizeWidth > 0 {
if contentOffsetX <= 0 {
scrollView.contentOffset = CGPoint(x: contentOffsetX + normalWidth, y: scrollView.contentOffset.y)
} else if contentOffsetX + width >= contentSizeWidth {
scrollView.contentOffset = CGPoint(x: contentOffsetX - normalWidth, y: scrollView.contentOffset.y)
}
}
if isRefreshingAutomaticallyWhenScrolling {
setNeedsDisplay()
}
}
func scrollViewDidScrollToTop(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndScrollingAnimation(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDragging(_ scrollView: UIScrollView, willDecelerate decelerate: Bool) {
if !decelerate {
updateCurrentSelected(notify: true)
}
}
}
These 3 are the classes that do the magic for the image part. Does anyone have a suggestion or a starting point for this? I tried looking over at https://github.com/rFlex/SCRecorder but I get a bit lost in Obj-C.
In iOS 9 / OS X 10.11 / tvOS, there's a convenience method for applying CIFilters to video. It works on an AVVideoComposition, so you can use it both for playback and for file-to-file import/export. See AVVideoComposition.init(asset:applyingCIFiltersWithHandler:) for the method docs.
There's an example in Apple's Core Image Programming Guide, too:
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.clampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.cropping(to: request.sourceImage.extent)
// Provide the filter output to the composition
request.finish(with: output, context: nil)
})
That part sets up the composition. After you've done that, you can either play it by assigning it to an AVPlayer or write it to a file with AVAssetExportSession. Since you're after the latter, here's an example of that:
let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1920x1200)
export.outputFileType = AVFileTypeQuickTimeMovie
export.outputURL = outURL
export.videoComposition = composition
export.exportAsynchronouslyWithCompletionHandler(/*...*/)
There's a bit more about this in the WWDC15 session on Core Image, starting around 20 minutes in.

UICollectionViewFlowLayout edge cells disappear when translation transform is applied

I'm using a custom UICollectionViewFlowLayout to make cells zoom and fade as they reach the top. To do this, I'm applying an alpha and transform to the layout attributes. Here's my code (link to full demo repo):
class EdgeZoomLayout: UICollectionViewFlowLayout {
override func layoutAttributesForElements(in rect: CGRect) -> [UICollectionViewLayoutAttributes]? {
guard let collectionView = collectionView else { return nil }
let rectAttributes = super.layoutAttributesForElements(in: rect)!.map { $0.copy() as! UICollectionViewLayoutAttributes }
let visibleRect = CGRect(origin: collectionView.contentOffset, size: collectionView.frame.size)
for attributes in rectAttributes where attributes.frame.intersects(visibleRect) {
let positionInFrameY = attributes.center.y - visibleRect.origin.y /// y origin of rectangle
let cutoff = CGFloat(30)
if positionInFrameY <= cutoff {
let translation = cutoff - positionInFrameY /// distance from the cutoff, 0 if exactly on cutoff
let alpha = 1 - (translation / 100)
let scale = 1 - (translation / 1000)
attributes.alpha = alpha
attributes.zIndex = Int(alpha >= 1 ? 1 : 0) /// if alpha is 1, keep on the top
attributes.transform = CGAffineTransform(scaleX: scale, y: scale).translatedBy(x: 0, y: translation)
} else {
attributes.zIndex = 1 /// keep on top if not getting zoomed
}
}
return rectAttributes
}
/// boilerplate code
override init() { super.init() }
required init?(coder aDecoder: NSCoder) { fatalError("init(coder:) has not been implemented") }
override func shouldInvalidateLayout(forBoundsChange newBounds: CGRect) -> Bool { return true }
override func invalidationContext(forBoundsChange newBounds: CGRect) -> UICollectionViewLayoutInvalidationContext {
let context = super.invalidationContext(forBoundsChange: newBounds) as! UICollectionViewFlowLayoutInvalidationContext
context.invalidateFlowLayoutDelegateMetrics = newBounds.size != collectionView?.bounds.size
return context
}
}
This is almost perfect, but the topmost cell disappears suddenly when it gets close to the edge:
How can I make the cell zoom and fade without vanishing like that? I think it might be because the collection view recycled the cell... but then, how do I keep it?
Either you can remove this line
where attributes.frame.intersects(visibleRect)
or you can add some point for visibleRect by this
for attributes in rectAttributes where attributes.frame.intersects(visibleRect.insetBy(dx: 0, dy: -15)) {

Rotate collectionView in circle following user direction

I am trying create collectionView with circuler layout and I want the collectionView to rotate in circle as the user swipe his finger on screen round in whatever direction. I found the circle layout for collectionView here is what I have done so far
to rotate this collectionView I have wrote this code
add gesture to collectionView
panGesture = UIPanGestureRecognizer(target: self, action: #selector(self.gestureReader(_:)))
panGesture.cancelsTouchesInView = false
self.collectionView.addGestureRecognizer(panGesture)
here is the gestureReader and animation methods
#objc private func gestureReader(_ gesture: UIPanGestureRecognizer) {
var startLocation = CGPoint.zero
var endLocation = CGPoint.zero
let currentLocation = gesture.location(in: self.collectionView)
if gesture.state == .began {
startLocation = currentLocation
}
if gesture.state == .ended {
endLocation = currentLocation
self.startRotatingView(start: startLocation, end: endLocation)
}
}
private func startRotatingView(start:CGPoint, end: CGPoint) {
let dx = end.x - start.x
let dy = end.y - start.y
let distance = abs(sqrt(dx*dx + dy*dy))
print(distance)
if start.x > end.x {
if start.y > end.y {
//positive value of pi
self.circleAnimation(-distance)
}else {
//negitive value of pi
self.circleAnimation(distance)
}
}else {
if start.y > end.y {
//positive value of pi
self.circleAnimation(-distance)
}else {
//negitive value of pi
self.circleAnimation(distance)
}
}
}
private func circleAnimation(_ angle:CGFloat) {
UIView.animate(withDuration: 0.7, delay: 0, options: .curveLinear, animations: {
self.collectionView.transform = CGAffineTransform.identity
self.collectionView.transform = CGAffineTransform.init(rotationAngle: angle)
}) { (true) in
//
}
}
First the animation is not working properly and second when collectionView gets rotated this is what I get
Question1 : What else do I need to add to make this animation smooth and follow user's finger?
Question2 : I want the collectionViewcells to stay as before animation, how can I achieve this, please help
Thanks in advance
I show you an example here. The decor View S1View is a subclass of UICollectionViewCell with the identifier "background".
The code is not hard to understand but tedious to put together. How to control animator is another story.
class TestCollectionViewLayout: UICollectionViewLayout {
lazy var dataSource : UICollectionViewDataSource? = {
self.collectionView?.dataSource
}()
var layouts : [IndexPath: UICollectionViewLayoutAttributes?] = [:]
var itemNumber : Int {
return dataSource!.collectionView(collectionView!, numberOfItemsInSection: 0)
}
override func layoutAttributesForElements(in rect: CGRect) -> [UICollectionViewLayoutAttributes]?{
var itemArray = (0..<itemNumber).map{ self.layoutAttributesForItem(at: IndexPath.init(row: $0, section: 0))!}
itemArray.append(self.layoutAttributesForDecorationView(ofKind:"background"
, at: IndexPath.init(row: 0, section: 0)))
return itemArray
}
override var collectionViewContentSize: CGSize { get{
return self.collectionView?.frame.size ?? CGSize.zero
}
}
lazy var dynamicAnimator = {UIDynamicAnimator(collectionViewLayout: self)}()
private func updateCurrentLayoutAttributesForItem(at indexPath: IndexPath, current: UICollectionViewLayoutAttributes?) -> UICollectionViewLayoutAttributes?{
return current
}
private func initLayoutAttributesForItem(at indexPath: IndexPath) -> UICollectionViewLayoutAttributes?{
let layoutAttributes = UICollectionViewLayoutAttributes(forCellWith: indexPath)
let center = (collectionView?.center)!
let angle = (CGFloat(indexPath.row) / CGFloat(itemNumber) * CGFloat.pi * 2)
layoutAttributes.center = CGPoint.init(x: center.x + cos(angle) * CGFloat(radius) , y: center.y + sin(angle) * CGFloat(radius) )
layoutAttributes.bounds = CGRect.init(x: 0, y: 0, width: 100, height: 100 )
if let decorator = self.decorator {
let itemBehavior =
UIAttachmentBehavior.pinAttachment(with: layoutAttributes, attachedTo: decorator, attachmentAnchor: layoutAttributes.center)
dynamicAnimator.addBehavior(itemBehavior)
layouts[indexPath] = layoutAttributes
}
return layoutAttributes
}
override func layoutAttributesForItem(at indexPath: IndexPath) -> UICollectionViewLayoutAttributes?{
guard let currentLayout = layouts[indexPath] else {
return initLayoutAttributesForItem(at:indexPath)}
return currentLayout
}
private let radius = 200
private var decorator: UICollectionViewLayoutAttributes?
override func layoutAttributesForDecorationView(ofKind elementKind: String, at indexPath: IndexPath) -> UICollectionViewLayoutAttributes{
guard let decorator = self.decorator else {
let layoutAttributes = UICollectionViewLayoutAttributes.init(forDecorationViewOfKind: elementKind, with: indexPath)
layoutAttributes.center = (self.collectionView?.center)!
layoutAttributes.bounds = CGRect.init(x: 0, y: 0, width: radius, height: radius)
self.decorator = layoutAttributes
return layoutAttributes
}
return decorator
}
lazy var s: UIDynamicItemBehavior = {
let decorator = self.decorator!
let s = UIDynamicItemBehavior.init(items: [decorator])
s.angularResistance = 1
dynamicAnimator.addBehavior(s)
return s
}()
func rotate(_ speed: CGFloat){
guard let decorator = self.decorator else {return}
s.addAngularVelocity(speed, for: decorator)
}
}
class TestCollectionViewController: UICollectionViewController {
var startLocation = CGPoint.zero
var endLocation = CGPoint.zero
#objc private func gestureReader(_ gesture: UIPanGestureRecognizer) {
let currentLocation = gesture.location(in: self.collectionView)
if gesture.state == .began {
startLocation = currentLocation
}
else if gesture.state == .ended {
endLocation = currentLocation
self.startRotatingView(start: startLocation, end: endLocation)
}
}
private func startRotatingView(start:CGPoint, end: CGPoint) {
let dx = end.x - start.x
let dy = end.y - start.y
let distance = abs(sqrt(dx*dx + dy*dy))
if start.x < end.x {
if start.y > end.y {
//positive value of pi
self.circleAnimation(-distance)
}else {
//negitive value of pi
self.circleAnimation(distance)
}
}else {
if start.y > end.y {
//positive value of pi
self.circleAnimation(-distance)
}else {
//negitive value of pi
self.circleAnimation(distance)
}
}
}
private func circleAnimation(_ angle:CGFloat) {
(collectionView.collectionViewLayout as? TestCollectionViewLayout).map{
$0.rotate(angle / 100)
}
// UIView.animate(withDuration: 0.7, delay: 0, options: .curveLinear, animations: {
// self.collectionView.transform = CGAffineTransform.identity
// self.collectionView.transform = CGAffineTransform.init(rotationAngle: angle)
// }) { (true) in
// //
// }
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// Timer.scheduledTimer(withTimeInterval: 1.0, repeats: false) { (Timer) in
// self.rotate()
// }
}
override func viewDidLoad() {
super.viewDidLoad()
collectionView.collectionViewLayout = TestCollectionViewLayout()
collectionView.collectionViewLayout.register(UINib.init(nibName: "S1View", bundle: nil) , forDecorationViewOfKind: "background")
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(self.gestureReader(_:)))
panGesture.cancelsTouchesInView = false
self.collectionView.addGestureRecognizer(panGesture)
}
var data: [Int] = [1,2,3,4,5,6,7]
override func numberOfSections(in collectionView: UICollectionView) -> Int {
return 1
}
override func collectionView(_ collectionView: UICollectionView, numberOfItemsInSection section: Int) -> Int {
return data.count
}
override func collectionView(_ collectionView: UICollectionView, cellForItemAt indexPath: IndexPath) -> UICollectionViewCell {
let cell = collectionView.dequeueReusableCell(withReuseIdentifier: reuseIdentifier, for: indexPath)
return cell
}
}
Maybe this tutorial will help: https://www.raywenderlich.com/1702-uicollectionview-custom-layout-tutorial-a-spinning-wheel
Your first problem is that you are rotating the whole collection view. Think of it like you are putting those circles on a piece of paper and then rotating that piece of paper. You don't want to rotate the whole collection view. You might not want to rotate the circles around a point because then the rotation affects the image and text in the circle. You just want to change the circle's position in a circular movement.
If the UICollectionView isn't working, you could ditch it and use regular UIViews and position them in a circular pattern (These functions should help: https://gist.github.com/akhilcb/8d03f1f88f87e996aec24748bdf0ce78). Once you have the views laid out in a circle then you just need to update the angle for each view as the user drags their finger. Store the previous angle on the view and add to it whatever you want when the user drags their finger. Little bit of trial and error and it shouldn't be too bad.
Update
The main reason to use collection views is if you have a lot of items and you need to reuse views like a list. If you don't need to reuse views then using a UICollectionView can be pain to understand, customize and change things. Here is a simple example of using regular views that rotate around a circle using a UIPanGestureRecognizer input.
Example:
import UIKit
class ViewController: UIViewController {
var rotatingViews = [RotatingView]()
let numberOfViews = 8
var circle = Circle(center: CGPoint(x: 200, y: 200), radius: 100)
var prevLocation = CGPoint.zero
override func viewDidLoad() {
super.viewDidLoad()
for i in 0...numberOfViews {
let angleBetweenViews = (2 * Double.pi) / Double(numberOfViews)
let viewOnCircle = RotatingView(circle: circle, angle: CGFloat(Double(i) * angleBetweenViews))
rotatingViews.append(viewOnCircle)
view.addSubview(viewOnCircle)
}
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(didPan(panGesture:)))
view.addGestureRecognizer(panGesture)
}
#objc func didPan(panGesture: UIPanGestureRecognizer){
switch panGesture.state {
case .began:
prevLocation = panGesture.location(in: view)
case .changed, .ended:
let nextLocation = panGesture.location(in: view)
let angle = circle.angleBetween(firstPoint: prevLocation, secondPoint: nextLocation)
rotatingViews.forEach({ $0.updatePosition(angle: angle)})
prevLocation = nextLocation
default: break
}
}
}
struct Circle {
let center: CGPoint
let radius: CGFloat
func pointOnCircle(angle: CGFloat) -> CGPoint {
let x = center.x + radius * cos(angle)
let y = center.y + radius * sin(angle)
return CGPoint(x: x, y: y)
}
func angleBetween(firstPoint: CGPoint, secondPoint: CGPoint) -> CGFloat {
let firstAngle = atan2(firstPoint.y - center.y, firstPoint.x - center.x)
let secondAnlge = atan2(secondPoint.y - center.y, secondPoint.x - center.x)
let angleDiff = (firstAngle - secondAnlge) * -1
return angleDiff
}
}
class RotatingView: UIView {
var currentAngle: CGFloat
let circle: Circle
init(circle: Circle, angle: CGFloat) {
self.currentAngle = angle
self.circle = circle
super.init(frame: CGRect(x: 0, y: 0, width: 60, height: 60))
center = circle.pointOnCircle(angle: currentAngle)
backgroundColor = .blue
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func updatePosition(angle: CGFloat) {
currentAngle += angle
center = circle.pointOnCircle(angle: currentAngle)
}
}
Circle is a struct that just holds the center of all the views, how far apart you want them (radius), and helper functions for calculating the angles found in the GitHub link above.
RotatingViews are the views that rotate around the middle.

Drag behaviour wrong after UIView transformation

I have a custom DraggableView that subclasses UIImageView. I take a photo with the camera, add the resulting UIImage to a DraggableView and then I can happily drag it around the screen, as intended.
Now, if the original photo was taken in landscape, I rotate it using:
if (image?.size.width > image?.size.height)
{
self.transform = CGAffineTransformMakeRotation(CGFloat(M_PI_2))
}
When I apply this transformation, the drag behaviour still works, but the directions are all wrong - if I drag left, the image moves up, not left. If I drag up, the image moves right, not up.
How do I fix this? I guess it is something to do with the UIPanGestureRecognizer being bound to the non-transformed view?
Edit: Current UIPanGestureRecognizer handler:
func onPhotoDrag(recognizer: UIPanGestureRecognizer?)
{
let translation = recognizer!.translationInView(recognizer?.view)
recognizer!.view!.center = CGPointMake(recognizer!.view!.center.x
+ translation.x, recognizer!.view!.center.y + translation.y);
recognizer?.setTranslation(CGPointZero, inView: recognizer?.view)
if (recognizer!.state == UIGestureRecognizerState.Ended)
{
let velocity = recognizer!.velocityInView(recognizer?.view)
let magnitude = sqrt((velocity.x * velocity.x)
+ (velocity.y * velocity.y))
let slideMult = magnitude / 300;
let slideFactor = 0.1 * slideMult;
let finalPoint = CGPointMake(recognizer!.view!.center.x
+ (velocity.x * slideFactor),
recognizer!.view!.center.y + (velocity.y * slideFactor));
// Animate the drag, and allow the drag delegate to do its work
DraggableView.animateWithDuration(Double(slideFactor),
delay: 0, options: UIViewAnimationOptions.CurveEaseOut,
animations: { recognizer?.view?.center = finalPoint },
completion: {(_) -> Void in self.dragDelegate?.onDragEnd(self)})
} // if: gesture ended
}
Update:
Thanks for posting your code. I pasted your code into my DraggableImageView and reproduced your problem. Although my version was handling the rotated view (without the animation), yours was going sideways.
The difference is that my code asks for the translationInView in the superview of the draggable view. You need to ask for the translationInView and velocityInView in the superview of your draggable view.
Change this line:
let translation = recognizer!.translationInView(recognizer?.view)
to:
let translation = recognizer!.translationInView(recognizer?.view?.superview)
and change this:
let velocity = recognizer!.velocityInView(recognizer?.view)
to:
let velocity = recognizer!.velocityInView(recognizer?.view?.superview)
and all will be happy.
Previous Answer:
Try this version:
class DraggableImageView: UIImageView {
override var image: UIImage? {
didSet {
if (image?.size.width > image?.size.height)
{
self.transform = CGAffineTransformMakeRotation(CGFloat(M_PI_2))
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
self.setup()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
self.setup()
}
func setup() {
self.userInteractionEnabled = true
let panGestureRecognizer = UIPanGestureRecognizer()
panGestureRecognizer.addTarget(self, action: #selector(draggedView(_:)))
self.addGestureRecognizer(panGestureRecognizer)
}
func moveByDeltaX(deltaX: CGFloat, deltaY: CGFloat) {
self.center.x += deltaX
self.center.y += deltaY
}
func draggedView(sender:UIPanGestureRecognizer) {
if let dragView = sender.view as? DraggableImageView, superview = dragView.superview {
superview.bringSubviewToFront(dragView)
let translation = sender.translationInView(superview)
sender.setTranslation(CGPointZero, inView: superview)
dragView.moveByDeltaX(translation.x, deltaY: translation.y)
}
}
}
Use example:
override func viewDidLoad() {
super.viewDidLoad()
let dragView = DraggableImageView(frame: CGRect(x: 50, y: 50, width: 96, height: 128))
dragView.image = UIImage(named: "landscapeImage.png")
self.view.addSubview(dragView)
}

Display image without scaling to view

In following code, even small images enlarge and fill whole view. Could you please help me how to display image as it's original state.
override func drawRect(rect: CGRect) {
if let inputCIImage = inputCIImage {
clampFilter.setValue(inputCIImage, forKey: kCIInputImageKey)
blurFilter.setValue(clampFilter.outputImage!, forKey: kCIInputImageKey)
let rect = CGRect(x: 0, y: 0, width: drawableWidth, height: drawableHeight)
ciContext.drawImage(blurFilter.outputImage!, inRect: rect, fromRect: inputCIImage.extent)
}
}
I figured out with following way :
func aspectFit(fromRect: CGRect, toRect: CGRect) -> CGRect {
let fromAspectRatio = fromRect.size.width / fromRect.size.height;
let toAspectRatio = toRect.size.width / toRect.size.height;
var fitRect = toRect
if (fromAspectRatio > toAspectRatio) {
fitRect.size.height = toRect.size.width / fromAspectRatio;
fitRect.origin.y += (toRect.size.height - fitRect.size.height) * 0.5;
} else {
fitRect.size.width = toRect.size.height * fromAspectRatio;
fitRect.origin.x += (toRect.size.width - fitRect.size.width) * 0.5;
}
return CGRectIntegral(fitRect)
}
func aspectFill(fromRect: CGRect, toRect: CGRect) -> CGRect {
let fromAspectRatio = fromRect.size.width / fromRect.size.height;
let toAspectRatio = toRect.size.width / toRect.size.height;
var fitRect = toRect
if (fromAspectRatio > toAspectRatio) {
fitRect.size.width = toRect.size.height * fromAspectRatio;
fitRect.origin.x += (toRect.size.width - fitRect.size.width) * 0.5;
} else {
fitRect.size.height = toRect.size.width / fromAspectRatio;
fitRect.origin.y += (toRect.size.height - fitRect.size.height) * 0.5;
}
return CGRectIntegral(fitRect)
}
func imageBoundsForContentMode(fromRect: CGRect, toRect: CGRect) -> CGRect {
switch contentMode {
case .ScaleAspectFill:
return aspectFill(fromRect, toRect: toRect)
case .ScaleAspectFit:
return aspectFit(fromRect, toRect: toRect)
default:
return fromRect
}
}
override func drawRect(rect: CGRect) {
if let inputCIImage = inputCIImage {
clampFilter.setValue(inputCIImage, forKey: kCIInputImageKey)
blurFilter.setValue(clampFilter.outputImage!, forKey: kCIInputImageKey)
let rect = CGRect(x: 0, y: 0, width: drawableWidth, height: drawableHeight)
let inputBounds = inputCIImage.extent
let targetBounds = imageBoundsForContentMode(inputBounds, toRect: rect)
ciContext.drawImage(blurFilter.outputImage!, inRect: targetBounds, fromRect: inputCIImage.extent)
}
}
You can get the size of image like if your image is
let image = UIImage(named: "someImage.png")
then you can get its width and size
print(image.size)
and then pass the size to your custom view to make the size you want.

Resources