SKShapeNode fillTexture with rotation - ios

I am having trouble figuring out how the fillTexture property of an SKShapeNode works when rotating that node. In this example I create a fan of 4 rotating blades around the center. The texture applied however varies in intensity as the blades rotate. Why? I want the gradient texture application be invariable with rotation.
func buildBackground() {
let bg = SKShapeNode.init(rect: UIScreen.main.bounds)
bg.fillColor = .orange
self.addChild(bg) // self == SKScene
let size = UIScreen.main.bounds.size
let fan = SKNode.init()
let H = size.height
let W = size.width/2
let blades = 4
let triangle = CGMutablePath.init()
triangle.move(to: .zero)
triangle.addLine(to: CGPoint(-W,H))
triangle.addLine(to: CGPoint(+W,H))
triangle.addLine(to: .zero)
triangle.closeSubpath()
let da = 360.0/CGFloat(blades)
var r : CGFloat = 0
let texture = SKTexture.init(size: size, color0: .white, color1: .clear, radius0: 0.25, radius1: 1)
for i in 0..<blades {
let blade = SKShapeNode.init(path: triangle)
fan.addChild(blade)
blade.fillTexture = texture
blade.lineWidth = 0
blade.fillColor = .black
blade.zRotation = r.degreesToRadians
r += da
}
bg.addChild(fan)
fan.position = bg.frame.center
fan.run(.repeatForever(.rotate(byAngle: -0.1, duration: 0.2)))
fan.alpha = 0.5
}
public extension SKTexture {
convenience init(size : CGSize,
color0 : SKColor,
color1 : SKColor,
radius0 : CGFloat, // 0
radius1 : CGFloat) // 1
{
let coreImageContext = CIContext(options: nil)
let gradientFilter = CIFilter(name: "CIRadialGradient")!
let inputCenter = CIVector.init(x: size.width/2, y: size.height/2)
let inputRadius0 = radius0 * size.diagonal/2
let inputRadius1 = radius1 * size.diagonal/2
gradientFilter.setDefaults()
gradientFilter.setValue(inputCenter, forKey: "inputCenter")
gradientFilter.setValue(inputRadius0, forKey: "inputRadius0")
gradientFilter.setValue(inputRadius1, forKey: "inputRadius1")
gradientFilter.setValue(color0.asCIColor, forKey: "inputColor0")
gradientFilter.setValue(color1.asCIColor, forKey: "inputColor1")
let coreImage = coreImageContext.createCGImage(gradientFilter.outputImage!, from: .init(size)) //CGRect(x: 0, y: 0, width: size.width, height: size.height))
self.init(cgImage: coreImage!)
}
}

Related

CoreImageContext's CreateCGImage producing wrong CGRect

Code:
enum GradientDirection {
case up
case left
case upLeft
case upRight
}
extension SKTexture {
convenience init(size: CGSize, color1: CIColor, color2: CIColor, direction: GradientDirection = .up) {
let coreImageContext = CIContext(options: nil)
let gradientFilter = CIFilter(name: "CILinearGradient")
gradientFilter!.setDefaults()
var startVector:CIVector
var endVector:CIVector
switch direction {
case .up:
startVector = CIVector(x: size.width/2, y: 0)
endVector = CIVector(x: size.width/2, y: size.height)
case .left:
startVector = CIVector(x: size.width, y: size.height/2)
endVector = CIVector(x: 0, y: size.height/2)
case .upLeft:
startVector = CIVector(x: size.width, y: 0)
endVector = CIVector(x: 0, y: size.height)
case .upRight:
startVector = CIVector(x: 0, y: 0)
endVector = CIVector(x: size.width, y: size.height)
}
gradientFilter!.setValue(startVector, forKey: "inputPoint0")
gradientFilter!.setValue(endVector, forKey: "inputPoint1")
gradientFilter!.setValue(color1, forKey: "inputColor0")
gradientFilter!.setValue(color2, forKey: "inputColor1")
let imgRect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let cgimg = coreImageContext.createCGImage(gradientFilter!.outputImage!, from: imgRect)!
print("cgimg: ", cgimg) // *** Observer this output ***** 103.0 width and height
self.init(cgImage: cgimg)
}
}
Calling Initializer:
// e.g. CGSize(width: 102.69999694824219, height: 102.69999694824219)
let textureSize = CGSize(width: self.frame.width, height: self.frame.height)
let shapeTexture = SKTexture(size: textureSize, color1: bottomColor, color2: topColor, direction: .upRight)
Passing width/height: 102.69999694824219, produces shapeTexture with width/height: 103.
Seems like coreImageContext.createCGImage is rounding off 102.69999694824219 to 103.0.
This results in minor unexpected output. How can I by-pass this rounding off? Or is there is any other method to generate Gradient image for Nodes?
More Code:
class BubbleNode: SKShapeNode {
private var backgroundNode: SKCropNode!
var label: SKLabelNode!
private var state: BubbleNodeState!
private let BubbleAnimationDuration = 0.2
private let BubbleIconPercentualInset = 0.4
var model: BubbleModel! {
didSet {
self.label.text = model.name
}
}
override init() {
super.init()
}
convenience init(withRadius radius: CGFloat) {
self.init()
self.init(circleOfRadius: radius)
state = .normal
self.configure()
}
private func configure() {
self.name = "mybubble"
physicsBody = SKPhysicsBody(circleOfRadius: 4 + self.path!.boundingBox.size.width / 2.0)
physicsBody!.isDynamic = true
physicsBody!.affectedByGravity = false
physicsBody!.allowsRotation = false
physicsBody!.mass = 0.3
physicsBody!.friction = 0.0
physicsBody!.linearDamping = 3
backgroundNode = SKCropNode()
backgroundNode.isUserInteractionEnabled = false
backgroundNode.position = CGPoint.zero
backgroundNode.zPosition = 0
self.addChild(backgroundNode)
label = SKLabelNode(fontNamed: "")
label.preferredMaxLayoutWidth = self.frame.size.width - 16
label.numberOfLines = 0
label.position = CGPoint.zero
label.fontColor = .white
label.fontSize = 10
label.isUserInteractionEnabled = false
label.verticalAlignmentMode = .center
label.horizontalAlignmentMode = .center
label.zPosition = 2
self.addChild(label)
}
func addGradientNode(withRadius radius: CGFloat) {
let gradientNode = SKShapeNode(path: self.path!)
gradientNode.zPosition = 1
gradientNode.fillColor = .white
gradientNode.strokeColor = .clear
let bottomColor = CIColor(red: 0.922, green: 0.256, blue: 0.523, alpha: 1)
let topColor = CIColor(red: 0.961, green: 0.364, blue: 0.155, alpha: 1)
let textureSize = CGSize(width: self.frame.width, height: self.frame.height)
let shapeTexture = SKTexture(size: textureSize, color1: bottomColor, color2: topColor, direction: .upRight)
gradientNode.fillTexture = shapeTexture
self.addChild(gradientNode)
print("path: ", self.path!)
print("textureSize: ", textureSize)
print("shapeTexture: ", shapeTexture)
}
}
Any image/texture will always have integer sizes since there are no sub-pixels in memory. So frameworks like Core Image will always round the given size up to the next integer value.
In contrast, the frame of a view is given in points, which need to be multiplied by the view's contentScaleFactor to get the actual pixel size (that you should use to generate your gradient). UIKit also allows for sub-pixel frame sizes, but under the hood, it will also round up when rendering the views to the screen.

CALayer live blur inside AVVideoCompositionCoreAnimationTool

I am trying to come up with a way to perform a live blur during AVVideoCompositionCoreAnimationTool export. I have tried UIVisualEffectView and stealing the layer of the underlying view. It works in preview but as soon as you use it inside AVVideoCompositionCoreAnimationTool the layer is black. So I started building a CALayer that does this but it is not updating often enough. What can I do to make it draw more often or what might work for using the AVVideoCompositionCoreAnimationTool and a live blur in iOS? Here is the layer I built.
class CABlurLayer : CALayer{
let maxBlurRadius : CGFloat = 20
var currentImageIndex : Float = 0
var blur : Int = 10
var context : CGContext?
var link : Timer?
var snap : UIImage?
var targetLayer : CALayer?
override init() {
super.init()
}
convenience init(targetLayer:CALayer?){
self.init()
self.targetLayer = targetLayer
self.drawsAsynchronously = true
if let tl = targetLayer{
self.masksToBounds = tl.masksToBounds
}
updateSnapShots()
link = Timer.scheduledTimer(timeInterval: 1/60, target: self, selector: #selector(updateBlur), userInfo: nil, repeats: true)
}
#objc func updateBlur(){
updateSnapShots()
DispatchQueue.main.async {
self.setNeedsDisplay()
}
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func updateSnapShots(){
guard let tl = targetLayer else{return}
UIGraphicsBeginImageContextWithOptions(self.bounds.size, false, 0)
guard let ctx = UIGraphicsGetCurrentContext() else{return}
tl.render(in: ctx)
let snapshot = UIGraphicsGetImageFromCurrentImageContext()
snap = snapshot?.applyBlurWithRadius(CGFloat(blur), tintColor: UIColor().withAlphaComponent(0), saturationDeltaFactor: 1.4)
}
override func draw(in ctx: CGContext) {
guard let blurredImage = snap,
let tl = targetLayer else{return}
var origin = tl.frame.origin
if let pres = tl.presentation(){
origin = pres.frame.origin
}
UIGraphicsPushContext(ctx)
blurredImage.draw(at: origin)
UIGraphicsPopContext()
}
}
class MyViewController : UIViewController {
override func loadView() {
let view = UIView()
view.backgroundColor = .white
self.view = view
let ur = URL(string: "https://images.pexels.com/photos/457882/pexels-photo-457882.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500")
URLSession.shared.dataTask(with: ur!) { (dt, response, error) in
if let data = dt{
print("we have a response")
let img = UIImage(data: data)
DispatchQueue.main.async {
let layer = CALayer()
layer.frame = CGRect(x: 0, y: 0, width: 500, height: 500)
view.layer.addSublayer(layer)
let imageLayer = CALayer()
imageLayer.masksToBounds = true
imageLayer.frame = CGRect(x: 0, y: 150, width: 400, height: 300)
imageLayer.contentsGravity = .resizeAspectFill
imageLayer.contents = img?.cgImage
layer.addSublayer(imageLayer)
let blur = CABlurLayer(targetLayer: imageLayer)
blur.frame = layer.bounds
layer.addSublayer(blur)
blur.blur = 20
let pos = CABasicAnimation(keyPath: "position.x")
pos.toValue = imageLayer.position.x
pos.fromValue = imageLayer.position.x - 100
pos.duration = 2
pos.repeatCount = 100
pos.autoreverses = true
imageLayer.add(pos, forKey: nil)
}
}
}.resume()
}
}
// Present the view controller in the Live View window
PlaygroundPage.current.liveView = MyViewController()
PlaygroundPage.current.needsIndefiniteExecution = true
UIImage Extensions
import UIKit
import Accelerate
public extension UIImage {
public func applyLightEffect() -> UIImage? {
return applyBlurWithRadius(30, tintColor: UIColor(white: 1.0, alpha: 0.3), saturationDeltaFactor: 1.8)
}
public func applyExtraLightEffect() -> UIImage? {
return applyBlurWithRadius(20, tintColor: UIColor(white: 0.97, alpha: 0.82), saturationDeltaFactor: 1.8)
}
public func applyDarkEffect() -> UIImage? {
return applyBlurWithRadius(20, tintColor: UIColor(white: 0.11, alpha: 0.73), saturationDeltaFactor: 1.8)
}
public func applyTintEffectWithColor(_ tintColor: UIColor) -> UIImage? {
let effectColorAlpha: CGFloat = 0.6
var effectColor = tintColor
let componentCount = tintColor.cgColor.numberOfComponents
if componentCount == 2 {
var b: CGFloat = 0
if tintColor.getWhite(&b, alpha: nil) {
effectColor = UIColor(white: b, alpha: effectColorAlpha)
}
} else {
var red: CGFloat = 0
var green: CGFloat = 0
var blue: CGFloat = 0
if tintColor.getRed(&red, green: &green, blue: &blue, alpha: nil) {
effectColor = UIColor(red: red, green: green, blue: blue, alpha: effectColorAlpha)
}
}
return applyBlurWithRadius(10, tintColor: effectColor, saturationDeltaFactor: -1.0, maskImage: nil)
}
public func applyBlurWithRadius(_ blurRadius: CGFloat, tintColor: UIColor?, saturationDeltaFactor: CGFloat, maskImage: UIImage? = nil) -> UIImage? {
// Check pre-conditions.
if (size.width < 1 || size.height < 1) {
print("*** error: invalid size: \(size.width) x \(size.height). Both dimensions must be >= 1: \(self)")
return nil
}
guard let cgImage = self.cgImage else {
print("*** error: image must be backed by a CGImage: \(self)")
return nil
}
if maskImage != nil && maskImage!.cgImage == nil {
print("*** error: maskImage must be backed by a CGImage: \(String(describing: maskImage))")
return nil
}
let __FLT_EPSILON__ = CGFloat(Float.ulpOfOne)
let screenScale = UIScreen.main.scale
let imageRect = CGRect(origin: CGPoint.zero, size: size)
var effectImage = self
let hasBlur = blurRadius > __FLT_EPSILON__
let hasSaturationChange = fabs(saturationDeltaFactor - 1.0) > __FLT_EPSILON__
if hasBlur || hasSaturationChange {
func createEffectBuffer(_ context: CGContext) -> vImage_Buffer {
let data = context.data
let width = vImagePixelCount(context.width)
let height = vImagePixelCount(context.height)
let rowBytes = context.bytesPerRow
return vImage_Buffer(data: data, height: height, width: width, rowBytes: rowBytes)
}
UIGraphicsBeginImageContextWithOptions(size, false, screenScale)
guard let effectInContext = UIGraphicsGetCurrentContext() else { return nil }
effectInContext.scaleBy(x: 1.0, y: -1.0)
effectInContext.translateBy(x: 0, y: -size.height)
effectInContext.draw(cgImage, in: imageRect)
var effectInBuffer = createEffectBuffer(effectInContext)
UIGraphicsBeginImageContextWithOptions(size, false, screenScale)
guard let effectOutContext = UIGraphicsGetCurrentContext() else { return nil }
var effectOutBuffer = createEffectBuffer(effectOutContext)
if hasBlur {
// A description of how to compute the box kernel width from the Gaussian
// radius (aka standard deviation) appears in the SVG spec:
// http://www.w3.org/TR/SVG/filters.html#feGaussianBlurElement
//
// For larger values of 's' (s >= 2.0), an approximation can be used: Three
// successive box-blurs build a piece-wise quadratic convolution kernel, which
// approximates the Gaussian kernel to within roughly 3%.
//
// let d = floor(s * 3*sqrt(2*pi)/4 + 0.5)
//
// ... if d is odd, use three box-blurs of size 'd', centered on the output pixel.
//
let inputRadius = blurRadius * screenScale
let d = floor(inputRadius * 3.0 * CGFloat(sqrt(2 * .pi) / 4 + 0.5))
var radius = UInt32(d)
if radius % 2 != 1 {
radius += 1 // force radius to be odd so that the three box-blur methodology works.
}
let imageEdgeExtendFlags = vImage_Flags(kvImageEdgeExtend)
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, nil, 0, 0, radius, radius, nil, imageEdgeExtendFlags)
vImageBoxConvolve_ARGB8888(&effectOutBuffer, &effectInBuffer, nil, 0, 0, radius, radius, nil, imageEdgeExtendFlags)
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, nil, 0, 0, radius, radius, nil, imageEdgeExtendFlags)
}
var effectImageBuffersAreSwapped = false
if hasSaturationChange {
let s: CGFloat = saturationDeltaFactor
let floatingPointSaturationMatrix: [CGFloat] = [
0.0722 + 0.9278 * s, 0.0722 - 0.0722 * s, 0.0722 - 0.0722 * s, 0,
0.7152 - 0.7152 * s, 0.7152 + 0.2848 * s, 0.7152 - 0.7152 * s, 0,
0.2126 - 0.2126 * s, 0.2126 - 0.2126 * s, 0.2126 + 0.7873 * s, 0,
0, 0, 0, 1
]
let divisor: CGFloat = 256
let matrixSize = floatingPointSaturationMatrix.count
var saturationMatrix = [Int16](repeating: 0, count: matrixSize)
for i: Int in 0 ..< matrixSize {
saturationMatrix[i] = Int16(round(floatingPointSaturationMatrix[i] * divisor))
}
if hasBlur {
vImageMatrixMultiply_ARGB8888(&effectOutBuffer, &effectInBuffer, saturationMatrix, Int32(divisor), nil, nil, vImage_Flags(kvImageNoFlags))
effectImageBuffersAreSwapped = true
} else {
vImageMatrixMultiply_ARGB8888(&effectInBuffer, &effectOutBuffer, saturationMatrix, Int32(divisor), nil, nil, vImage_Flags(kvImageNoFlags))
}
}
if !effectImageBuffersAreSwapped {
effectImage = UIGraphicsGetImageFromCurrentImageContext()!
}
UIGraphicsEndImageContext()
if effectImageBuffersAreSwapped {
effectImage = UIGraphicsGetImageFromCurrentImageContext()!
}
UIGraphicsEndImageContext()
}
// Set up output context.
UIGraphicsBeginImageContextWithOptions(size, false, screenScale)
guard let outputContext = UIGraphicsGetCurrentContext() else { return nil }
outputContext.scaleBy(x: 1.0, y: -1.0)
outputContext.translateBy(x: 0, y: -size.height)
// Draw base image.
outputContext.draw(cgImage, in: imageRect)
// Draw effect image.
if hasBlur {
outputContext.saveGState()
if let maskCGImage = maskImage?.cgImage {
outputContext.clip(to: imageRect, mask: maskCGImage);
}
outputContext.draw(effectImage.cgImage!, in: imageRect)
outputContext.restoreGState()
}
// Add in color tint.
if let color = tintColor {
outputContext.saveGState()
outputContext.setFillColor(color.cgColor)
outputContext.fill(imageRect)
outputContext.restoreGState()
}
// Output image is ready.
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return outputImage
}
public func blurImage()->UIImage?{
return self.applyBlurWithRadius(20, tintColor: UIColor().withAlphaComponent(0), saturationDeltaFactor: 1.4)
}
}
This has been around a while so I thought I would share my solution. I stole the CABackdropLayer from a UIVisualEffectView to achieve a live blur. You can init a layer of this type but it is private. However since a public view uses this layer and I am just taking it from that view I am not having to access a private api in a super direct way.

Cut rounded image with the face from CIDetector and CIFaceFeature

How to cut the frame that I receive as faceViewBounds to make a big circle around the face? It's like a badge with the face of the person.
Maybe I should get the center of faceViewBounds then I have to find this center in theImageView.image and draw a circle with big diameter and then cut the rest outside of the circle by logic, but with code I don't know how to do it.. Any suggestions?
func detectFaceFrom(ImageView theImageView: UIImageView) {
guard let personImage = CIImage(image: theImageView.image!) else {
return
}
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: personImage)
let ciImageSize = personImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
if(faces?.count==1){
for face in faces as! [CIFaceFeature] {
var faceViewBounds = face.bounds.applying(transform)
let viewSize = theImageView.bounds.size
let scale = min(viewSize.width / ciImageSize.width,
viewSize.height / ciImageSize.height)
let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
let faceBox = UIView(frame: faceViewBounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.green.cgColor
faceBox.backgroundColor = UIColor.clear
drawCircleFromCenter(faceViewBounds.center ???
}
return cuttedCircleWithFace
}else{
return theImageView.image
}
}
I just saw an ad in Facebook with that exact same thing that I want to accomplish:
The problem is that you should use your image.size instead of using theImageView.bounds.size. You should also handle features options CIDetectorImageOrientation.
extension UIImage{
var faces: [UIImage] {
guard let ciimage = CIImage(image: self) else { return [] }
var orientation: NSNumber {
switch imageOrientation {
case .up: return 1
case .upMirrored: return 2
case .down: return 3
case .downMirrored: return 4
case .leftMirrored: return 5
case .right: return 6
case .rightMirrored: return 7
case .left: return 8
}
}
return CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyLow])?
.features(in: ciimage, options: [CIDetectorImageOrientation: orientation])
.compactMap {
let rect = $0.bounds.insetBy(dx: -10, dy: -10)
UIGraphicsBeginImageContextWithOptions(rect.size, false, scale)
defer { UIGraphicsEndImageContext() }
UIImage(ciImage: ciimage.cropped(to: rect)).draw(in: CGRect(origin: .zero, size: rect.size))
guard let face = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
// now that you have your face image you need to properly apply a circle mask to it
let size = face.size
let breadth = min(size.width, size.height)
let breadthSize = CGSize(width: breadth, height: breadth)
UIGraphicsBeginImageContextWithOptions(breadthSize, false, scale)
defer { UIGraphicsEndImageContext() }
guard let cgImage = face.cgImage?.cropping(to: CGRect(origin: CGPoint(x: size.width > size.height ? (size.width-size.height).rounded(.down)/2 : 0, y: size.height > size.width ? (size.height-size.width).rounded(.down)/2 : 0), size: breadthSize))
else { return nil }
let faceRect = CGRect(origin: .zero, size: CGSize(width: min(size.width, size.height), height: min(size.width, size.height)))
UIBezierPath(ovalIn: faceRect).addClip()
UIImage(cgImage: cgImage).draw(in: faceRect)
return UIGraphicsGetImageFromCurrentImageContext()
} ?? []
}
}
let profilePicture = UIImage(data: try! Data(contentsOf: URL(string:"http://i.stack.imgur.com/Xs4RX.jpg")!))!
if let face = profilePicture.faces.first {
print(face.size)
}
If you just want to focus on a face inside of image. You should first set up an image view and mask it into a circle:
let image = UIImage(named: "face.jpg")
let imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: 50.0, height: 50.0))
imageView.image = image
imageView.contentMode = .scaleAspectFill
imageView.layer.cornerRadius = imageView.bounds.height * 0.5
imageView.layer.masksToBounds = true
Next you run the CIDetector
func focusOnFace(in imageView: UIImageView)
{
guard let image = imageView.image,
var personImage = CIImage(image: image) else { return }
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
// This will just take the first detected face but you can do something more sophisticated
guard let face = faceDetector?.features(in: personImage).first as? CIFaceFeature else { return }
// Make the facial rect a square so it will mask nicely to a circle (may not be strictly necessary as `CIFaceFeature` bounds is typically a square)
var rect = face.bounds
rect.size.height = max(face.bounds.height, face.bounds.width)
rect.size.width = max(face.bounds.height, face.bounds.width)
rect = rect.insetBy(dx: -30, dy: -30) // Adds padding around the face so it's not so tightly cropped
// Crop to the face detected
personImage = personImage.cropping(to: rect)
// Set the new cropped image as the image view image
imageView.image = UIImage(ciImage: personImage)
}
Example
Before running focusOnFace:
After running focusOnFace:
Updated Example
Before running focusOnFace:
After running focusOnFace:

position of a CALayer after change bounds size

I have a CATextLayer with some CGAffineTransform. The bounds is same to parent bounds.
If I change the bounds size to text size, then position of a layer is also changed.
The red text is the layer without changing bounds.
How to calculating the position after changing bounds size? (green text)
Here is code from Playground:
import Cocoa
let frame = CGRect(origin: .zero, size: CGSize(width: 1200, height: 800))
let transform = CGAffineTransform(a: 0.811281, b: 0.584656, c: -0.484656, d: 0.611281, tx: 420.768, ty: -170.049)
func text(with color: NSColor, apply: Bool = false) -> CATextLayer {
let layer = CATextLayer()
let text = NSAttributedString(string: "valami", attributes: [NSForegroundColorAttributeName : color , NSFontAttributeName : NSFont.boldSystemFont(ofSize: 180)])
layer.string = text
layer.frame = frame
layer.bounds = frame
if apply {
layer.bounds.size = text.size()
// let translate = CGAffineTransform(translationX: -408.5, y: -12.8)
// let concate = transform.concatenating(translate)
// layer.setAffineTransform(concate)
layer.setAffineTransform(transform)
}
let border = CAShapeLayer()
border.path = CGPath(rect: layer.bounds, transform: nil)
border.fillColor = nil
border.strokeColor = color.cgColor
layer.addSublayer(border)
return layer
}
let background = CAShapeLayer()
background.path = CGPath(rect: frame, transform: nil)
background.fillColor = NSColor.white.cgColor
background.strokeColor = nil
let textLayer1 = text(with: .red)
let textLayer2 = text(with: NSColor.green.withAlphaComponent(0.5), apply: true)
let group = CALayer()
group.addSublayer(textLayer1)
group.bounds = frame
group.frame = frame
group.setAffineTransform(transform)
let view = NSView(frame: frame)
view.wantsLayer = true
view.layer = CALayer()
view.layer?.addSublayer(background)
view.layer?.addSublayer(group)
view.layer?.addSublayer(textLayer2)
view
OK, I figured out!
This is the working apply block:
if apply {
let textSize = text.size()
let widthChange = layer.bounds.width - textSize.width
let heightChange = layer.bounds.height - textSize.height
let anx = ((transform.c / 2) * heightChange) - ((transform.a / 2) * widthChange)
let any = ((transform.d / 2) * heightChange) - ((transform.b / 2) * widthChange)
let translate = CGAffineTransform(translationX: anx, y: any)
let concate = transform.concatenating(translate)
layer.setAffineTransform(concate)
layer.bounds.size = textSize
}

Live Rotation of SCNNode

I want my node to rotate on the Y axis while the camera is rotating.
class ArtScene: SCNScene{
var image = UIImage()
var geometry = SCNBox()
var motion = MotionKit()
var node = SCNNode()
convenience init(create: Bool) {
self.init()
image = UIImage(named: "sf")!
let lenght: CGFloat = 50
let width: CGFloat = self.image.size.width
let height: CGFloat = self.image.size.height
self.geometry = SCNBox(width: width / 1000 , height: height / 1000, length: lenght / 1000, chamferRadius: 0.005)
print("Geometry WIDTH: \(self.geometry.width)")
print("Geometry LENGHT: \(self.geometry.length)")
self.geometry.firstMaterial?.diffuse.contents = UIColor.red
self.geometry.firstMaterial?.specular.contents = UIColor.white
self.geometry.firstMaterial?.emission.contents = UIColor.blue
node = SCNNode(geometry: self.geometry)
self.rootNode.addChildNode(node)
}
func rotate() {
motion.getDeviceMotionObject { (deviceMotion) in
let rotationY = deviceMotion.rotationRate.y
print(rotationY)
let rotationAction = SCNAction.rotate(by: CGFloat(1.0), around: SCNVector3(0,rotationY,0), duration: 2.0)
self.node.runAction(rotationAction)
}
}
}
I can print the Device motion, but can't get the node to rotate at the same time. How can I do it?

Resources