Panoramic View iOS - ios

So I have generated an nice panoramic viewer but the camera is pointing downward. Meaning if I hold my phone flat like it was on a table, it rotates and looks around the sphere nicely. I want to be able to hold it normal (up) and look around. Here is my code.
I have an extension I am using here:
import UIKit
import SceneKit
import CoreMotion
extension CMDeviceMotion {
func gaze(atOrientation orientation: UIInterfaceOrientation) -> SCNVector4 {
let attitude = self.attitude.quaternion
let aq = GLKQuaternionMake(Float(attitude.x), Float(attitude.y), Float(attitude.z), Float(attitude.w))
let final: SCNVector4
switch UIApplication.shared.statusBarOrientation {
case .landscapeRight:
let cq = GLKQuaternionMakeWithAngleAndAxis(Float(Double.pi), 0, 1, 0)
let q = GLKQuaternionMultiply(cq, aq)
final = SCNVector4(x: -q.y, y: q.x, z: q.z, w: q.w)
case .landscapeLeft:
let cq = GLKQuaternionMakeWithAngleAndAxis(Float(-Double.pi), 0, 1, 0)
let q = GLKQuaternionMultiply(cq, aq)
final = SCNVector4(x: q.y, y: -q.x, z: q.z, w: q.w)
case .portraitUpsideDown:
let cq = GLKQuaternionMakeWithAngleAndAxis(Float(Double.pi), 1, 0, 0)
let q = GLKQuaternionMultiply(cq, aq)
final = SCNVector4(x: -q.x, y: -q.y, z: q.z, w: q.w)
case .unknown:
fallthrough
case .portrait:
let cq = GLKQuaternionMakeWithAngleAndAxis(Float(-Double.pi), 1, 0, 0)
let q = GLKQuaternionMultiply(cq, aq)
final = SCNVector4(x: q.x, y: q.y, z: q.z, w: q.w)
}
return final
}
}
Then the viewcontroller looks like this:
import UIKit
import SceneKit
import CoreMotion
class ViewController: UIViewController {
let motionManager = CMMotionManager()
let cameraNode = SCNNode()
#IBOutlet weak var sceneView: SCNView!
override func viewDidLoad() {
super.viewDidLoad()
guard let imagePath = Bundle.main.path(forResource: "Hellbrunn25", ofType: "jpg") else {
fatalError("Failed to find path for panaromic file.")
}
guard let image = UIImage(contentsOfFile:imagePath) else {
fatalError("Failed to load panoramic image")
}
let scene = SCNScene()
sceneView.scene = scene
sceneView.allowsCameraControl = true
//Create node, containing a sphere, using the panoramic image as a texture
let sphere = SCNSphere(radius: 20.0)
sphere.firstMaterial!.isDoubleSided = true
sphere.firstMaterial!.diffuse.contents = image
let sphereNode = SCNNode(geometry: sphere)
sphereNode.position = SCNVector3Make(0,0,0)
scene.rootNode.addChildNode(sphereNode)
// Camera, ...
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3Make(0, 0, 0)
scene.rootNode.addChildNode(cameraNode)
guard motionManager.isDeviceMotionAvailable else {
fatalError("Device motion is not available")
}
// Action
motionManager.deviceMotionUpdateInterval = 1.0 / 60.0
motionManager.startDeviceMotionUpdates(to: OperationQueue.main, withHandler: {
(deviceDidMove, Error)-> Void in
let attitude: CMAttitude = deviceDidMove!.attitude
self.cameraNode.orientation = SCNVector4Make(Float(attitude.quaternion.x), Float(attitude.quaternion.y), Float(attitude.quaternion.z), Float(attitude.quaternion.w))
})
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func deviceDidMove(motion: CMDeviceMotion?, error: NSError?) {
if let motion = motion {
let orientation = motion.gaze(atOrientation: UIApplication.shared.statusBarOrientation)
cameraNode.orientation = orientation
}
}
}
How do I get the camera to face forward on the image. Thanks

I figured this out if any one else is looking for an answer. I changed the portrait mode case to the following:
let cq = GLKQuaternionMakeWithAngleAndAxis(Float(-Double.pi*0.5), 1, 0, 0)
let q = GLKQuaternionMultiply(cq, aq)
final = SCNVector4(x: q.x, y: q.y, z: q.z, w: q.w)

Related

How to calculate regionOfInterest for iOS Vision's VNRequest

We have implemented scanning app which shows camera preview at bottom of screen with 300px height and screen width.
What is the way of calculating RegionOfInterest to pass Vision to detect barcodes as per camera preview size?
We have configured camera as below-
func setupCamera() {
guard let captureDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .back) else {
print("Could not create capture device.")
return
}
self.captureDevice = captureDevice
if captureDevice.supportsSessionPreset(.hd4K3840x2160) {
captureSession.sessionPreset = AVCaptureSession.Preset.hd4K3840x2160
bufferAspectRatio = 3840.0 / 2160.0
} else {
captureSession.sessionPreset = AVCaptureSession.Preset.hd1920x1080
bufferAspectRatio = 1920.0 / 1080.0
}
guard let deviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
print("Could not create device input.")
return
}
if captureSession.canAddInput(deviceInput) {
captureSession.addInput(deviceInput)
}
// Configure video data output.
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange]
if captureSession.canAddOutput(videoDataOutput) {
captureSession.addOutput(videoDataOutput)
videoDataOutput.connection(with: AVMediaType.video)?.preferredVideoStabilizationMode = .off
} else {
print("Could not add VDO output")
return
}
// Set zoom and autofocus to help focus on very small text.
do {
try captureDevice.lockForConfiguration()
//captureDevice.videoZoomFactor = 2
captureDevice.autoFocusRangeRestriction = .near
captureDevice.unlockForConfiguration()
} catch {
print("Could not set zoom level due to error: \(error)")
return
}
captureSession.startRunning()
}
Calculation of RegionOfInterest as below
func calculateRegionOfInterest() {
// Figure out size of ROI.
let size: CGSize = CGSize(width: previewWidth/screenWidth, height: previewHeight/screenHeight) //ratio of preview to screen.
// Make it centered.
regionOfInterest.origin = CGPoint(x: (1 - size.width) / 2, y: (1 - size.height) / 2)
regionOfInterest.size = size
// ROI changed, update transform.
setupOrientationAndTransform()
// Update the cutout to match the new ROI.
DispatchQueue.main.async {
// Wait for the next run cycle before updating the cutout. This
// ensures that the preview layer already has its new orientation.
self.updateCutout()
}
}
func updateCutout() {
// Figure out where the cutout ends up in layer coordinates.
let roiRectTransform = bottomToTopTransform.concatenating(uiRotationTransform)
let cutout = previewView.videoPreviewLayer.layerRectConverted(fromMetadataOutputRect: regionOfInterest.applying(roiRectTransform))
// Create the mask.
let path = UIBezierPath(rect: cutoutView.frame)
path.append(UIBezierPath(rect: cutout))
maskLayer.path = path.cgPath
// Move the number view down to under cutout.
var numFrame = cutout
numFrame.origin.y += numFrame.size.height
numberView.frame = numFrame
}
func setupOrientationAndTransform() {
// Recalculate the affine transform between Vision coordinates and AVF coordinates.
// Compensate for region of interest.
let roi = regionOfInterest
roiToGlobalTransform = CGAffineTransform(translationX: roi.origin.x, y: roi.origin.y).scaledBy(x: roi.width, y: roi.height)
// Compensate for orientation (buffers always come in the same orientation).
switch currentOrientation {
case .landscapeLeft:
textOrientation = CGImagePropertyOrientation.up
uiRotationTransform = CGAffineTransform.identity
case .landscapeRight:
textOrientation = CGImagePropertyOrientation.down
uiRotationTransform = CGAffineTransform(translationX: 1, y: 1).rotated(by: CGFloat.pi)
case .portraitUpsideDown:
textOrientation = CGImagePropertyOrientation.left
uiRotationTransform = CGAffineTransform(translationX: 1, y: 0).rotated(by: CGFloat.pi / 2)
default: // We default everything else to .portraitUp
textOrientation = CGImagePropertyOrientation.right
uiRotationTransform = CGAffineTransform(translationX: 0, y: 1).rotated(by: -CGFloat.pi / 2)
}
// Full Vision ROI to AVF transform.
visionToAVFTransform = roiToGlobalTransform.concatenating(bottomToTopTransform).concatenating(uiRotationTransform)
}
Setting regionOfInterest and textOrientation to VNImageRequestHandler as below-
override func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
var request: VNDetectBarcodesRequest()
if #available(iOS 15.0, *) {
request.revision = VNDetectBarcodesRequestRevision2
} else {
// Fallback on earlier versions
request.revision = VNDetectBarcodesRequestRevision1
}
// Only run on the region of interest for maximum speed.
request.regionOfInterest = regionOfInterest
let requestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: textOrientation, options: [:])
do {
try requestHandler.perform([request])
} catch {
print(error)
}
}
}
Region of Interest is working as expected for iPad in Portrait mode but for iPod and iPad in Landscape mode the above code does not scan barcodes from top and bottom corners when barcode is completely visible in camera preview.
Reference links:
Vision framework barcode detection region of interest not working
https://developer.apple.com/documentation/vision/reading_phone_numbers_in_real_time

How to make surface material double-sided for MDLAsset?

I am trying to create an app that allows me to scan a room and then export a 3D file using lidar. I am able to do this with the following code (thanks to ARKit – How to export OBJ from iPhone/iPad with LiDAR?); however, it seems that the surfaces are all one-sided (when you rotate the object around, the backside of every surface is clear). How can I make the surface double-sided in my saveButtonTapped method?
import RealityKit
import ARKit
import MetalKit
import ModelIO
#IBOutlet var arView: ARView!
var saveButton: UIButton!
let rect = CGRect(x: 50, y: 50, width: 100, height: 50)
override func viewDidLoad() {
super.viewDidLoad()
let tui = UIControl.Event.touchUpInside
saveButton = UIButton(frame: rect)
saveButton.setTitle("Save", for: [])
saveButton.addTarget(self, action: #selector(saveButtonTapped), for: tui)
self.view.addSubview(saveButton)
}
#objc func saveButtonTapped(sender: UIButton) {
print("Saving is executing...")
guard let frame = arView.session.currentFrame
else { fatalError("Can't get ARFrame") }
guard let device = MTLCreateSystemDefaultDevice()
else { fatalError("Can't create MTLDevice") }
let allocator = MTKMeshBufferAllocator(device: device)
let asset = MDLAsset(bufferAllocator: allocator)
let meshAnchors = frame.anchors.compactMap { $0 as? ARMeshAnchor }
for ma in meshAnchors {
let geometry = ma.geometry
let vertices = geometry.vertices
let faces = geometry.faces
let vertexPointer = vertices.buffer.contents()
let facePointer = faces.buffer.contents()
for vtxIndex in 0 ..< vertices.count {
let vertex = geometry.vertex(at: UInt32(vtxIndex))
var vertexLocalTransform = matrix_identity_float4x4
vertexLocalTransform.columns.3 = SIMD4<Float>(x: vertex.0,
y: vertex.1,
z: vertex.2,
w: 1.0)
let vertexWorldTransform = (ma.transform * vertexLocalTransform).position
let vertexOffset = vertices.offset + vertices.stride * vtxIndex
let componentStride = vertices.stride / 3
vertexPointer.storeBytes(of: vertexWorldTransform.x,
toByteOffset: vertexOffset,
as: Float.self)
vertexPointer.storeBytes(of: vertexWorldTransform.y,
toByteOffset: vertexOffset + componentStride,
as: Float.self)
vertexPointer.storeBytes(of: vertexWorldTransform.z,
toByteOffset: vertexOffset + (2 * componentStride),
as: Float.self)
}
let byteCountVertices = vertices.count * vertices.stride
let byteCountFaces = faces.count * faces.indexCountPerPrimitive * faces.bytesPerIndex
let vertexBuffer = allocator.newBuffer(with: Data(bytesNoCopy: vertexPointer,
count: byteCountVertices,
deallocator: .none), type: .vertex)
let indexBuffer = allocator.newBuffer(with: Data(bytesNoCopy: facePointer,
count: byteCountFaces,
deallocator: .none), type: .index)
let indexCount = faces.count * faces.indexCountPerPrimitive
let material = MDLMaterial(name: "material",
scatteringFunction: MDLPhysicallyPlausibleScatteringFunction())
let submesh = MDLSubmesh(indexBuffer: indexBuffer,
indexCount: indexCount,
indexType: .uInt32,
geometryType: .triangles,
material: material)
let vertexFormat = MTKModelIOVertexFormatFromMetal(vertices.format)
let vertexDescriptor = MDLVertexDescriptor()
vertexDescriptor.attributes[0] = MDLVertexAttribute(name: MDLVertexAttributePosition,
format: vertexFormat,
offset: 0,
bufferIndex: 0)
vertexDescriptor.layouts[0] = MDLVertexBufferLayout(stride: ma.geometry.vertices.stride)
let mesh = MDLMesh(vertexBuffer: vertexBuffer,
vertexCount: ma.geometry.vertices.count,
descriptor: vertexDescriptor,
submeshes: [submesh])
asset.add(mesh)
}
let filePath = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask).first!
let usd: URL = filePath.appendingPathComponent("model.usd")
if MDLAsset.canExportFileExtension("usd") {
do {
try asset.export(to: usd)
let controller = UIActivityViewController(activityItems: [usd],
applicationActivities: nil)
controller.popoverPresentationController?.sourceView = sender
self.present(controller, animated: true, completion: nil)
} catch let error {
fatalError(error.localizedDescription)
}
} else {
fatalError("Can't export USD")
}
}

MTKView Transparency

I can't make my MTKView clear its background. I've set the view's and its layer's isOpaque to false, background color to clear and tried multiple solutions found on google/stackoverflow (most in the code below like loadAction and clearColor of color attachment) but nothing works.
All the background color settings seem to be ignored. Setting loadAction and clearColor of MTLRenderPassColorAttachmentDescriptor does nothing.
I'd like to have my regular UIView's drawn under the MTKView. What am I missing?
// initialization
let metal = MTKView(frame: self.view.bounds)
metal.device = MTLCreateSystemDefaultDevice()
self.renderer = try! MetalRenderer(mtkView: metal)
metal.delegate = self.renderer
self.view.addSubview(metal);
import Foundation
import MetalKit
import simd
public enum MetalError: Error {
case mtkViewError
case renderError
}
internal class MetalRenderer: NSObject, MTKViewDelegate {
private let commandQueue: MTLCommandQueue;
private let pipelineState: MTLRenderPipelineState
private var viewportSize: SIMD2<UInt32> = SIMD2(x: 10, y: 10);
private weak var mtkView: MTKView?
init(mtkView: MTKView) throws {
guard let device = mtkView.device else {
print("device not found error")
throw MetalError.mtkViewError
}
self.mtkView = mtkView
// Load all the shader files with a .metal file extension in the project.
guard let defaultLibrary = device.makeDefaultLibrary() else {
print("Could not find library")
throw MetalError.mtkViewError
}
let vertexFunction = defaultLibrary.makeFunction(name: "vertexShader")
let fragmentFunction = defaultLibrary.makeFunction(name: "fragmentShader")
mtkView.layer.isOpaque = false;
mtkView.layer.backgroundColor = UIColor.clear.cgColor
mtkView.isOpaque = false;
mtkView.backgroundColor = .clear
let pipelineStateDescriptor = MTLRenderPipelineDescriptor();
pipelineStateDescriptor.label = "Pipeline";
pipelineStateDescriptor.vertexFunction = vertexFunction;
pipelineStateDescriptor.fragmentFunction = fragmentFunction;
pipelineStateDescriptor.isAlphaToCoverageEnabled = true
pipelineStateDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm;
pipelineStateDescriptor.colorAttachments[0].isBlendingEnabled = true;
pipelineStateDescriptor.colorAttachments[0].destinationRGBBlendFactor = .oneMinusSourceAlpha;
pipelineStateDescriptor.colorAttachments[0].destinationAlphaBlendFactor = .oneMinusSourceAlpha;
pipelineState = try! device.makeRenderPipelineState(descriptor: pipelineStateDescriptor);
guard let queue = device.makeCommandQueue() else {
print("make command queue error")
throw MetalError.mtkViewError
}
commandQueue = queue
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
viewportSize.x = UInt32(size.width)
viewportSize.y = UInt32(size.height)
}
func draw(in view: MTKView) {
let vertices: [Vertex] = [
Vertex(position: SIMD3<Float>(x: 250.0, y: -250.0, z: 0)),
Vertex(position: SIMD3<Float>(x: -250.0, y: -250.0, z: 0)),
Vertex(position: SIMD3<Float>(x: 0.0, y: 250.0, z: 0)),
]
guard let commandBuffer = commandQueue.makeCommandBuffer() else {
print("Couldn't create command buffer")
return
}
// Create a new command buffer for each render pass to the current drawable.
commandBuffer.label = "MyCommand";
// Obtain a renderPassDescriptor generated from the view's drawable textures.
guard let renderPassDescriptor = view.currentRenderPassDescriptor else {
print("Couldn't create render pass descriptor")
return
}
guard let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
print("Couldn't create render encoder")
return
}
renderPassDescriptor.colorAttachments[0].loadAction = .clear
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(1.0, 0.0, 0.0, 0.5)
renderEncoder.label = "MyRenderEncoder";
// Set the region of the drawable to draw into.
renderEncoder.setViewport(MTLViewport(originX: 0.0, originY: 0.0, width: Double(viewportSize.x), height: Double(viewportSize.y), znear: 0.0, zfar: 1.0))
renderEncoder.setRenderPipelineState(pipelineState)
// Pass in the parameter data.
renderEncoder.setVertexBytes(vertices, length: MemoryLayout<Vertex>.size * vertices.count, index: Int(VertexInputIndexVertices.rawValue))
renderEncoder.setVertexBytes(&viewportSize, length: MemoryLayout<SIMD2<UInt32>>.size, index: Int(VertexInputIndexViewportSize.rawValue))
renderEncoder.drawPrimitives(type: MTLPrimitiveType.triangle, vertexStart: 0, vertexCount: 3)
renderEncoder.endEncoding()
// Schedule a present once the framebuffer is complete using the current drawable.
guard let drawable = view.currentDrawable else {
print("Couldn't get current drawable")
return
}
commandBuffer.present(drawable)
// Finalize rendering here & push the command buffer to the GPU.
commandBuffer.commit()
}
}
Thanks to Frank, the answer was to just set the clearColor property of the view itself, which I missed. I also removed most adjustments in the MTLRenderPipelineDescriptor, who's code is now:
let pipelineStateDescriptor = MTLRenderPipelineDescriptor();
pipelineStateDescriptor.label = "Pipeline";
pipelineStateDescriptor.vertexFunction = vertexFunction;
pipelineStateDescriptor.fragmentFunction = fragmentFunction;
pipelineStateDescriptor.colorAttachments[0].pixelFormat =
mtkView.colorPixelFormat;
Also no changes necessary to MTLRenderPassDescriptor from currentRenderPassDescriptor.
EDIT: Also be sure to set isOpaque property of MTKView to false too.

My animated SKSprite nodes atlas won't change during touchBegan

I created a sprite with an animated texture atlas. I then want that animation to change based on the direction the sprite is heading. I try to do so with "self.player!.texture = firstFrametexture(ofatlas)"
Here is where I build the player (put inside didMove but the animation starts without having to move?)
func buildPlayer() {
let playerAnimatedAtlas = SKTextureAtlas(named: "animation")
var walkFrames: [SKTexture] = []
let numImages = playerAnimatedAtlas.textureNames.count
for i in 1...numImages {
let playerTextureName = "player\(i)"
walkFrames.append(playerAnimatedAtlas.textureNamed(playerTextureName))
}
walkingPlayer = walkFrames
let firstFrameTexture = walkingPlayer[0]
player! = SKSpriteNode(texture: firstFrameTexture)
player!.position = CGPoint(x: frame.midX, y: frame.midY)
addChild(player!)
}
Here is where I animate the player
func animatePlayer() {
player!.run(SKAction.repeatForever(
SKAction.animate(with: walkingPlayer,
timePerFrame: 0.5,
resize: false,
restore: true)),
withKey:"walkingInPlacePlayer")
}
Here is where I try to change the animation. Everything works, the if statement are correctly executed. Just nothing changes
override func touchesBegan(_ touches: Set, with event: UIEvent?) {
super.touchesBegan(touches, with: event)
if let location = touches.first?.location(in: self) {
let horizontalAction = SKAction.move(to: location, duration: 1.0)
horizontalAction.timingMode = SKActionTimingMode.easeOut
player?.run(horizontalAction)
let playerAnimatedAtlas = SKTextureAtlas(named: "animation")
let lplayerAnimatedAtlas = SKTextureAtlas(named: "animationleft")
var walkFrames: [SKTexture] = []
var lwalkFrames: [SKTexture] = []
let numImages = playerAnimatedAtlas.textureNames.count
for i in 1...numImages {
let playerTextureName = "player\(i)"
let playerLeftTextureName = "lplayer\(i)"
walkFrames.append(playerAnimatedAtlas.textureNamed(playerTextureName))
lwalkFrames.append(lplayerAnimatedAtlas.textureNamed(playerLeftTextureName))
}
walkingPlayer = walkFrames
lwalkingPlayer = lwalkFrames
let firstFrameTexture = walkingPlayer[0]
let leftFrameTexture = lwalkingPlayer[0]
if location.x > player!.position.x {
self.player!.texture = firstFrameTexture
print("rightsuccess")
} else if location.x < player!.position.x {
self.player!.texture = leftFrameTexture
print("leftsuccess")
}
}
}

ios Swift - Group separated sprites with syncronized animation

I am trying to make a synchronized animation (a large video decomposed by frames on separated and smaller puzzle jigsaw parts). This game is a video puzzle. Here is the code I use in three parts by way of example:
func Anim_Puzzle13 (Node13 : SKPuzzle) {
let puzzle13 = SKAction.animateWithTextures(sheet_puzzle13.Puzzle13_(), timePerFrame: 0.066)
NPuzzle13 = Node13
NPuzzle13.runAction(SKAction.repeatActionForever(puzzle13))
NPuzzle13.position = CGPoint(x: 500, y: 400)
NPuzzle13.zPosition = 1
}
func Anim_Puzzle19 (Node19 : SKPuzzle) {
let puzzle19 = SKAction.animateWithTextures(sheet_puzzle19.Puzzle19_(), timePerFrame: 0.066)
NPuzzle19 = Node19
NPuzzle19.runAction(SKAction.repeatActionForever(puzzle19))
NPuzzle19.position = CGPoint(x: 600, y: 500)
NPuzzle19.zPosition = 1
}
func Anim_Puzzle30 (Node30 : SKPuzzle) {
let puzzle30 = SKAction.animateWithTextures(sheet_puzzle30.Puzzle30_(), timePerFrame: 0.066)
NPuzzle30 = Node30
NPuzzle30.runAction(SKAction.repeatActionForever(puzzle30))
NPuzzle30.position = CGPoint(x: 700, y: 600)
NPuzzle30.zPosition = 1
}
It works well but it does not synchronize between the animations and the video has no integrity. I searched for a long time for a solution to make the animations synchronize; I see two possibilities: first is to create a unique SKNode() with all the jigsaw parts inside, but I want to be able to move each jigsaw part independently and have had no success getting a synchronized animation with this method.
The other way seem to be to create a group with all the animations together but this doesn't work, and causes the application to stop.
Here is all the code I use:
import SpriteKit
import UIKit
import AVFoundation
import AVKit
import CoreFoundation
private let kpuzzleNodeName = "puzzle"
private let kdancing = "dancing"
class SKPuzzle: SKSpriteNode {
var name2:String = "";
}
class GameScene: SKScene {
var background = SKVideoNode(videoFileNamed: "Video_Socle.mov")
var selectedNode = SKPuzzle()
var player:AVPlayer?
var videoNode:SKVideoNode?
var NPuzzle13 = SKPuzzle()
var NPuzzle19 = SKPuzzle()
var NPuzzle30 = SKPuzzle()
var NPuzzle11 = SKPuzzle()
var NPuzzle29 = SKPuzzle()
var NPuzzle35 = SKPuzzle()
var puzzle13 = SKAction()
var puzzle19 = SKAction()
var puzzle30 = SKAction()
var puzzle11 = SKAction()
var puzzle29 = SKAction()
var puzzle35 = SKAction()
let sheet_puzzle13 = Puzzle13()
let sheet_puzzle19 = Puzzle19()
let sheet_puzzle30 = Puzzle30()
let sheet_puzzle11 = Puzzle11()
let sheet_puzzle29 = Puzzle29()
let sheet_puzzle35 = Puzzle35()
override init(size: CGSize) {
super.init(size: size)
// 1
self.background.name = kdancing
self.background.anchorPoint = CGPointZero
background.zPosition = 0
self.addChild(background)
// 2
let sheet = Statiques()
let sprite_dancing1 = SKSpriteNode(texture: sheet.Dancing1())
let sprite_dancing2 = SKSpriteNode(texture: sheet.Dancing2())
sprite_dancing1.name = kdancing
sprite_dancing2.name = kdancing
sprite_dancing1.position = CGPoint(x: 837, y: 752)
sprite_dancing1.zPosition = 2
sprite_dancing2.position = CGPoint(x: 1241, y: 752)
sprite_dancing2.zPosition = 2
background.addChild(sprite_dancing1)
background.addChild(sprite_dancing2)
let imageNames = [sheet.Puzzle13() , sheet.Puzzle19(), sheet.Puzzle30(), sheet.Puzzle11(), sheet.Puzzle29(), sheet.Puzzle35() ]
for i in 0..<imageNames.count {
let imageName = imageNames[i]
let sprite = SKPuzzle(texture: imageName)
sprite.name = kpuzzleNodeName
sprite.name2 = "\(i)"
let offsetFraction = (CGFloat(i) + 1.0)/(CGFloat(imageNames.count) + 1.0)
sprite.position = CGPoint(x: size.width * offsetFraction, y: size.height / 2)
sprite.zPosition = 3
background.addChild(sprite)
}
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func touchesBegan(touches: Set<UITouch>, withEvent event: UIEvent?) {
for touch: AnyObject in touches {
let positionInScene = touch.locationInNode(self)
selectNodeForTouch(positionInScene)
}
}
override func didMoveToView(view: SKView) {
let urlStr = NSBundle.mainBundle().pathForResource("Video_Socle", ofType: "mov")
let url = NSURL(fileURLWithPath: urlStr!)
player = AVPlayer(URL: url)
NSNotificationCenter.defaultCenter().addObserverForName(AVPlayerItemDidPlayToEndTimeNotification, object: player!.currentItem, queue: nil)
{ notification in
let t1 = CMTimeMake(5, 100);
self.player!.seekToTime(t1)
self.player!.play()
}
videoNode = SKVideoNode(AVPlayer: player!)
videoNode!.position = CGPointMake(frame.size.width/2, frame.size.height/2)
videoNode!.size = CGSize(width: 2048, height: 1536)
videoNode!.zPosition = 0
background.addChild(videoNode!)
videoNode!.play()
let gestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(GameScene.handlePanFrom(_:)))
self.view!.addGestureRecognizer(gestureRecognizer)
}
func handlePanFrom(recognizer : UIPanGestureRecognizer) {
if recognizer.state == .Began {
var touchLocation = recognizer.locationInView(recognizer.view)
touchLocation = self.convertPointFromView(touchLocation)
self.selectNodeForTouch(touchLocation)
} else if recognizer.state == .Changed {
var translation = recognizer.translationInView(recognizer.view!)
translation = CGPoint(x: translation.x, y: -translation.y)
self.panForTranslation(translation)
recognizer.setTranslation(CGPointZero, inView: recognizer.view)
} else if recognizer.state == .Ended {
}
}
func degToRad(degree: Double) -> CGFloat {
return CGFloat(degree / 180.0 * M_PI)
}
func selectNodeForTouch(touchLocation : CGPoint) {
// 1
let touchedNode = self.nodeAtPoint(touchLocation)
if touchedNode is SKPuzzle {
// 2
if !selectedNode.isEqual(touchedNode) {
selectedNode.runAction(SKAction.rotateToAngle(0.0, duration: 0.1))
selectedNode = touchedNode as! SKPuzzle
// 3
if touchedNode.name! == kpuzzleNodeName {
let sequence = SKAction.sequence([SKAction.rotateByAngle(degToRad(-4.0), duration: 0.1),
SKAction.rotateByAngle(0.0, duration: 0.1),
SKAction.rotateByAngle(degToRad(4.0), duration: 0.1)])
selectedNode.runAction(SKAction.repeatActionForever(sequence))
}
}
}
}
func panForTranslation(translation : CGPoint) {
let position = selectedNode.position
if selectedNode.name! == kpuzzleNodeName {
selectedNode.position = CGPoint(x: position.x + translation.x * 2, y: position.y + translation.y * 2)
print (selectedNode.name)
print (selectedNode.name2)
if selectedNode.name2 == "0" {
Anim_Puzzle13(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "1" {
Anim_Puzzle19(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "2" {
Anim_Puzzle30(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "3" {
Anim_Puzzle11(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "4" {
Anim_Puzzle29(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "5" {
Anim_Puzzle35(selectedNode)
}
}
}
func Anim_Puzzle13 (Node13 : SKPuzzle) {
let puzzle13 = SKAction.animateWithTextures(sheet_puzzle13.Puzzle13_(), timePerFrame: 0.066)
NPuzzle13 = Node13
NPuzzle13.runAction(SKAction.repeatActionForever(puzzle13))
NPuzzle13.position = CGPoint(x: 500, y: 400)
NPuzzle13.zPosition = 1
}
func Anim_Puzzle19 (Node19 : SKPuzzle) {
let puzzle19 = SKAction.animateWithTextures(sheet_puzzle19.Puzzle19_(), timePerFrame: 0.066)
NPuzzle19 = Node19
NPuzzle19.runAction(SKAction.repeatActionForever(puzzle19))
NPuzzle19.position = CGPoint(x: 600, y: 500)
NPuzzle19.zPosition = 1
}
func Anim_Puzzle30 (Node30 : SKPuzzle) {
let puzzle30 = SKAction.animateWithTextures(sheet_puzzle30.Puzzle30_(), timePerFrame: 0.066)
NPuzzle30 = Node30
NPuzzle30.runAction(SKAction.repeatActionForever(puzzle30))
NPuzzle30.position = CGPoint(x: 700, y: 600)
NPuzzle30.zPosition = 1
}
func Anim_Puzzle11 (Node11 : SKPuzzle) {
let puzzle11 = SKAction.animateWithTextures(sheet_puzzle11.Puzzle11_(), timePerFrame: 0.066)
NPuzzle11 = Node11
NPuzzle11.runAction(SKAction.repeatActionForever(puzzle11))
NPuzzle11.position = CGPoint(x: 800, y: 700)
NPuzzle11.zPosition = 1
}
func Anim_Puzzle29 (Node29 : SKPuzzle) {
let puzzle29 = SKAction.animateWithTextures(sheet_puzzle29.Puzzle29_(), timePerFrame: 0.066)
NPuzzle29 = Node29
NPuzzle29.runAction(SKAction.repeatActionForever(puzzle29))
NPuzzle29.position = CGPoint(x: 900, y: 800)
NPuzzle29.zPosition = 1
}
func Anim_Puzzle35 (Node35 : SKPuzzle) {
let puzzle35 = SKAction.animateWithTextures(sheet_puzzle35.Puzzle35_(), timePerFrame: 0.066)
NPuzzle35 = Node35
NPuzzle35.runAction(SKAction.repeatActionForever(puzzle35))
NPuzzle35.position = CGPoint(x: 1000, y: 900)
NPuzzle35.zPosition = 1
}
}
I'm not sure if it's possible to synchronize animations like this: with SKAction() in several separated parts, because it's necessary to be able to select them individually.
UPDATE: I've tried to follow the action group way but I have the same animation playing on each sprite instead of a different animation synchronized for each sprite (6 different animations synchronized: 6 different sprites):
let sheet13 = Puzzle13()
let sheet19 = Puzzle19()
let sheet30 = Puzzle30()
let sheet11 = Puzzle11()
let sheet29 = Puzzle29()
let sheet35 = Puzzle35()
let imageAnims = [sheet13.Puzzle13_0000() , sheet19.Puzzle19_0000(), sheet30.Puzzle30_0000(), sheet11.Puzzle11_0000(), sheet29.Puzzle29_0000(), sheet35.Puzzle35_0000() ]
let puzzle13 = SKAction.animateWithTextures(sheet13.Puzzle13_(), timePerFrame: 0.066)
let puzzle19 = SKAction.animateWithTextures(sheet19.Puzzle19_(), timePerFrame: 0.066)
let puzzle30 = SKAction.animateWithTextures(sheet30.Puzzle30_(), timePerFrame: 0.066)
let puzzle11 = SKAction.animateWithTextures(sheet11.Puzzle11_(), timePerFrame: 0.066)
let puzzle29 = SKAction.animateWithTextures(sheet29.Puzzle29_(), timePerFrame: 0.066)
let puzzle35 = SKAction.animateWithTextures(sheet35.Puzzle35_(), timePerFrame: 0.066)
let group = SKAction.group([puzzle13,puzzle19,puzzle30,puzzle11,puzzle29,puzzle35])
for i in 0..<imageAnims.count {
let imageAnim = imageAnims[i]
let spriteAnim = SKPuzzle(texture: imageAnim)
spriteAnim.name = kanimNodeName
spriteAnim.name2 = "\(i)"
let offsetFraction = (CGFloat(i) + 1.0)/(CGFloat(imageAnims.count) + 1.0)
spriteAnim.position = CGPoint(x: ((size.width)*2) * offsetFraction, y: size.height * 1.5)
spriteAnim.zPosition = 3
spriteAnim.runAction(SKAction.repeatActionForever(group))
background.addChild(spriteAnim)
}
First of all I want to list two differents method to create your SKAction:
Starting with parallel actions by using SKAction.group:
let sprite = SKSpriteNode(imageNamed:"Spaceship")
let scale = SKAction.scaleTo(0.1, duration: 0.5)
let fade = SKAction.fadeOutWithDuration(0.5)
let group = SKAction.group([scale, fade])
sprite.runAction(group)
Another useful method can be the completion , so you can know when an SKAction was finished:
extension SKNode
{
func runAction( action: SKAction!, withKey: String!, optionalCompletion: dispatch_block_t? )
{
if let completion = optionalCompletion
{
let completionAction = SKAction.runBlock( completion )
let compositeAction = SKAction.sequence([ action, completionAction ])
runAction( compositeAction, withKey: withKey )
}
else
{
runAction( action, withKey: withKey )
}
}
}
Usage:
node.runAction(move,withKey:"swipeMove",optionalCompletion: {
// here the action is finished, do whatever you want
})
After that, about your project, I've seen many node.runAction.., you can also adopt this strategy to sinchronize your actions:
var myAction30 :SKAction!
var myAction31 :SKAction!
self.myAction30 = SKAction.repeatActionForever(puzzle30)
self.myAction31 = SKAction.repeatActionForever(puzzle31)
let group = SKAction.group([self.myAction30, self.myAction31])
self.runAction(group)
UPDATE: I've seen your update part, when you speak about "synchronize" probably you don't means the "running in parallel" actions.
So, if you want to run an action after another there is also:
self.myAction30 = SKAction.repeatActionForever(puzzle30)
self.myAction31 = SKAction.repeatActionForever(puzzle31)
let sequence = SKAction.sequence([self.myAction30, self.myAction31])
self.runAction(sequence)

Resources