How to make surface material double-sided for MDLAsset? - ios

I am trying to create an app that allows me to scan a room and then export a 3D file using lidar. I am able to do this with the following code (thanks to ARKit – How to export OBJ from iPhone/iPad with LiDAR?); however, it seems that the surfaces are all one-sided (when you rotate the object around, the backside of every surface is clear). How can I make the surface double-sided in my saveButtonTapped method?
import RealityKit
import ARKit
import MetalKit
import ModelIO
#IBOutlet var arView: ARView!
var saveButton: UIButton!
let rect = CGRect(x: 50, y: 50, width: 100, height: 50)
override func viewDidLoad() {
super.viewDidLoad()
let tui = UIControl.Event.touchUpInside
saveButton = UIButton(frame: rect)
saveButton.setTitle("Save", for: [])
saveButton.addTarget(self, action: #selector(saveButtonTapped), for: tui)
self.view.addSubview(saveButton)
}
#objc func saveButtonTapped(sender: UIButton) {
print("Saving is executing...")
guard let frame = arView.session.currentFrame
else { fatalError("Can't get ARFrame") }
guard let device = MTLCreateSystemDefaultDevice()
else { fatalError("Can't create MTLDevice") }
let allocator = MTKMeshBufferAllocator(device: device)
let asset = MDLAsset(bufferAllocator: allocator)
let meshAnchors = frame.anchors.compactMap { $0 as? ARMeshAnchor }
for ma in meshAnchors {
let geometry = ma.geometry
let vertices = geometry.vertices
let faces = geometry.faces
let vertexPointer = vertices.buffer.contents()
let facePointer = faces.buffer.contents()
for vtxIndex in 0 ..< vertices.count {
let vertex = geometry.vertex(at: UInt32(vtxIndex))
var vertexLocalTransform = matrix_identity_float4x4
vertexLocalTransform.columns.3 = SIMD4<Float>(x: vertex.0,
y: vertex.1,
z: vertex.2,
w: 1.0)
let vertexWorldTransform = (ma.transform * vertexLocalTransform).position
let vertexOffset = vertices.offset + vertices.stride * vtxIndex
let componentStride = vertices.stride / 3
vertexPointer.storeBytes(of: vertexWorldTransform.x,
toByteOffset: vertexOffset,
as: Float.self)
vertexPointer.storeBytes(of: vertexWorldTransform.y,
toByteOffset: vertexOffset + componentStride,
as: Float.self)
vertexPointer.storeBytes(of: vertexWorldTransform.z,
toByteOffset: vertexOffset + (2 * componentStride),
as: Float.self)
}
let byteCountVertices = vertices.count * vertices.stride
let byteCountFaces = faces.count * faces.indexCountPerPrimitive * faces.bytesPerIndex
let vertexBuffer = allocator.newBuffer(with: Data(bytesNoCopy: vertexPointer,
count: byteCountVertices,
deallocator: .none), type: .vertex)
let indexBuffer = allocator.newBuffer(with: Data(bytesNoCopy: facePointer,
count: byteCountFaces,
deallocator: .none), type: .index)
let indexCount = faces.count * faces.indexCountPerPrimitive
let material = MDLMaterial(name: "material",
scatteringFunction: MDLPhysicallyPlausibleScatteringFunction())
let submesh = MDLSubmesh(indexBuffer: indexBuffer,
indexCount: indexCount,
indexType: .uInt32,
geometryType: .triangles,
material: material)
let vertexFormat = MTKModelIOVertexFormatFromMetal(vertices.format)
let vertexDescriptor = MDLVertexDescriptor()
vertexDescriptor.attributes[0] = MDLVertexAttribute(name: MDLVertexAttributePosition,
format: vertexFormat,
offset: 0,
bufferIndex: 0)
vertexDescriptor.layouts[0] = MDLVertexBufferLayout(stride: ma.geometry.vertices.stride)
let mesh = MDLMesh(vertexBuffer: vertexBuffer,
vertexCount: ma.geometry.vertices.count,
descriptor: vertexDescriptor,
submeshes: [submesh])
asset.add(mesh)
}
let filePath = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask).first!
let usd: URL = filePath.appendingPathComponent("model.usd")
if MDLAsset.canExportFileExtension("usd") {
do {
try asset.export(to: usd)
let controller = UIActivityViewController(activityItems: [usd],
applicationActivities: nil)
controller.popoverPresentationController?.sourceView = sender
self.present(controller, animated: true, completion: nil)
} catch let error {
fatalError(error.localizedDescription)
}
} else {
fatalError("Can't export USD")
}
}

Related

MTKView Transparency

I can't make my MTKView clear its background. I've set the view's and its layer's isOpaque to false, background color to clear and tried multiple solutions found on google/stackoverflow (most in the code below like loadAction and clearColor of color attachment) but nothing works.
All the background color settings seem to be ignored. Setting loadAction and clearColor of MTLRenderPassColorAttachmentDescriptor does nothing.
I'd like to have my regular UIView's drawn under the MTKView. What am I missing?
// initialization
let metal = MTKView(frame: self.view.bounds)
metal.device = MTLCreateSystemDefaultDevice()
self.renderer = try! MetalRenderer(mtkView: metal)
metal.delegate = self.renderer
self.view.addSubview(metal);
import Foundation
import MetalKit
import simd
public enum MetalError: Error {
case mtkViewError
case renderError
}
internal class MetalRenderer: NSObject, MTKViewDelegate {
private let commandQueue: MTLCommandQueue;
private let pipelineState: MTLRenderPipelineState
private var viewportSize: SIMD2<UInt32> = SIMD2(x: 10, y: 10);
private weak var mtkView: MTKView?
init(mtkView: MTKView) throws {
guard let device = mtkView.device else {
print("device not found error")
throw MetalError.mtkViewError
}
self.mtkView = mtkView
// Load all the shader files with a .metal file extension in the project.
guard let defaultLibrary = device.makeDefaultLibrary() else {
print("Could not find library")
throw MetalError.mtkViewError
}
let vertexFunction = defaultLibrary.makeFunction(name: "vertexShader")
let fragmentFunction = defaultLibrary.makeFunction(name: "fragmentShader")
mtkView.layer.isOpaque = false;
mtkView.layer.backgroundColor = UIColor.clear.cgColor
mtkView.isOpaque = false;
mtkView.backgroundColor = .clear
let pipelineStateDescriptor = MTLRenderPipelineDescriptor();
pipelineStateDescriptor.label = "Pipeline";
pipelineStateDescriptor.vertexFunction = vertexFunction;
pipelineStateDescriptor.fragmentFunction = fragmentFunction;
pipelineStateDescriptor.isAlphaToCoverageEnabled = true
pipelineStateDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm;
pipelineStateDescriptor.colorAttachments[0].isBlendingEnabled = true;
pipelineStateDescriptor.colorAttachments[0].destinationRGBBlendFactor = .oneMinusSourceAlpha;
pipelineStateDescriptor.colorAttachments[0].destinationAlphaBlendFactor = .oneMinusSourceAlpha;
pipelineState = try! device.makeRenderPipelineState(descriptor: pipelineStateDescriptor);
guard let queue = device.makeCommandQueue() else {
print("make command queue error")
throw MetalError.mtkViewError
}
commandQueue = queue
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
viewportSize.x = UInt32(size.width)
viewportSize.y = UInt32(size.height)
}
func draw(in view: MTKView) {
let vertices: [Vertex] = [
Vertex(position: SIMD3<Float>(x: 250.0, y: -250.0, z: 0)),
Vertex(position: SIMD3<Float>(x: -250.0, y: -250.0, z: 0)),
Vertex(position: SIMD3<Float>(x: 0.0, y: 250.0, z: 0)),
]
guard let commandBuffer = commandQueue.makeCommandBuffer() else {
print("Couldn't create command buffer")
return
}
// Create a new command buffer for each render pass to the current drawable.
commandBuffer.label = "MyCommand";
// Obtain a renderPassDescriptor generated from the view's drawable textures.
guard let renderPassDescriptor = view.currentRenderPassDescriptor else {
print("Couldn't create render pass descriptor")
return
}
guard let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
print("Couldn't create render encoder")
return
}
renderPassDescriptor.colorAttachments[0].loadAction = .clear
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(1.0, 0.0, 0.0, 0.5)
renderEncoder.label = "MyRenderEncoder";
// Set the region of the drawable to draw into.
renderEncoder.setViewport(MTLViewport(originX: 0.0, originY: 0.0, width: Double(viewportSize.x), height: Double(viewportSize.y), znear: 0.0, zfar: 1.0))
renderEncoder.setRenderPipelineState(pipelineState)
// Pass in the parameter data.
renderEncoder.setVertexBytes(vertices, length: MemoryLayout<Vertex>.size * vertices.count, index: Int(VertexInputIndexVertices.rawValue))
renderEncoder.setVertexBytes(&viewportSize, length: MemoryLayout<SIMD2<UInt32>>.size, index: Int(VertexInputIndexViewportSize.rawValue))
renderEncoder.drawPrimitives(type: MTLPrimitiveType.triangle, vertexStart: 0, vertexCount: 3)
renderEncoder.endEncoding()
// Schedule a present once the framebuffer is complete using the current drawable.
guard let drawable = view.currentDrawable else {
print("Couldn't get current drawable")
return
}
commandBuffer.present(drawable)
// Finalize rendering here & push the command buffer to the GPU.
commandBuffer.commit()
}
}
Thanks to Frank, the answer was to just set the clearColor property of the view itself, which I missed. I also removed most adjustments in the MTLRenderPipelineDescriptor, who's code is now:
let pipelineStateDescriptor = MTLRenderPipelineDescriptor();
pipelineStateDescriptor.label = "Pipeline";
pipelineStateDescriptor.vertexFunction = vertexFunction;
pipelineStateDescriptor.fragmentFunction = fragmentFunction;
pipelineStateDescriptor.colorAttachments[0].pixelFormat =
mtkView.colorPixelFormat;
Also no changes necessary to MTLRenderPassDescriptor from currentRenderPassDescriptor.
EDIT: Also be sure to set isOpaque property of MTKView to false too.

How to convert YUV frames (from OTVideoFrame) to CVPixelBuffer

I need to convert YUV Frames to CVPixelBuffer that I get from OTVideoFrame Class
This class provides an array of planes in the video frame which contains three elements for y,u,v frame each at index 0,1,2.
#property (nonatomic, retain) NSPointerArray *planes
and format of the video frame
#property (nonatomic, retain) OTVideoFormat *format
That contains Properties like width, height, bytesPerRow etc. of the frame
I need to add filter to the image I receive in the form of OTVideoFrame, I have already tried these answers :
How to convert from YUV to CIImage for iOS
Create CVPixelBuffer from YUV with IOSurface backed
These two links have the solutions in Objective-C but I want to do it in swift. One of the answers in second link was in swift but it lacks some information about the YUVFrame struct that the answer has reference to.
The Format that I receive is NV12
Here is what I have been trying to do till now but I don't know how to proceed next :-
/**
* Calcualte the size of each plane from OTVideoFrame.
*
* #param frame The frame to render.
* #return tuple containing three elements for size of each plane
*/
fileprivate func calculatePlaneSize(forFrame frame: OTVideoFrame)
-> (ySize: Int, uSize: Int, vSize: Int){
guard let frameFormat = frame.format
else {
return (0, 0 ,0)
}
let baseSize = Int(frameFormat.imageWidth * frameFormat.imageHeight) * MemoryLayout<GLubyte>.size
return (baseSize, baseSize / 4, baseSize / 4)
}
/**
* Renders a frame to the video renderer.
*
* #param frame The frame to render.
*/
func renderVideoFrame(_ frame: OTVideoFrame) {
let planeSize = calculatePlaneSize(forFrame: frame)
let yPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.ySize)
let uPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.uSize)
let vPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.vSize)
memcpy(yPlane, frame.planes?.pointer(at: 0), planeSize.ySize)
memcpy(uPlane, frame.planes?.pointer(at: 1), planeSize.uSize)
memcpy(vPlane, frame.planes?.pointer(at: 2), planeSize.vSize)
let yStride = frame.format!.bytesPerRow.object(at: 0) as! Int
// multiply chroma strides by 2 as bytesPerRow represents 2x2 subsample
let uStride = frame.format!.bytesPerRow.object(at: 1) as! Int
let vStride = frame.format!.bytesPerRow.object(at: 2) as! Int
let width = frame.format!.imageWidth
let height = frame.format!.imageHeight
var pixelBuffer: CVPixelBuffer? = nil
var err: CVReturn;
err = CVPixelBufferCreate(kCFAllocatorDefault, Int(width), Int(height), kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, nil, &pixelBuffer)
if (err != 0) {
NSLog("Error at CVPixelBufferCreate %d", err)
fatalError()
}
}
Taking Guidance from those two links I tried to create Pixel buffer but I got stuck every time at this point because the conversion of the Objective-C code after this is not similar to what we have in Swift 3.
For those who are looking for a fast solution, I did with swift Accelerate
using vImageConvert_AnyToAny(_:_:_:_:_:) function.
import Foundation
import Accelerate
import UIKit
import OpenTok
class Accelerater{
var infoYpCbCrToARGB = vImage_YpCbCrToARGB()
init() {
_ = configureYpCbCrToARGBInfo()
}
func configureYpCbCrToARGBInfo() -> vImage_Error {
print("Configuring")
var pixelRange = vImage_YpCbCrPixelRange(Yp_bias: 0,
CbCr_bias: 128,
YpRangeMax: 255,
CbCrRangeMax: 255,
YpMax: 255,
YpMin: 1,
CbCrMax: 255,
CbCrMin: 0)
let error = vImageConvert_YpCbCrToARGB_GenerateConversion(
kvImage_YpCbCrToARGBMatrix_ITU_R_601_4!,
&pixelRange,
&infoYpCbCrToARGB,
kvImage420Yp8_Cb8_Cr8,
kvImageARGB8888,
vImage_Flags(kvImagePrintDiagnosticsToConsole))
print("Configration done \(error)")
return error
}
public func convertFrameVImageYUV(toUIImage frame: OTVideoFrame, flag: Bool) -> UIImage {
var result: UIImage? = nil
let width = frame.format?.imageWidth ?? 0
let height = frame.format?.imageHeight ?? 0
var pixelBuffer: CVPixelBuffer? = nil
_ = CVPixelBufferCreate(kCFAllocatorDefault, Int(width), Int(height), kCVPixelFormatType_32BGRA, nil, &pixelBuffer)
_ = convertFrameVImageYUV(frame, to: pixelBuffer)
var ciImage: CIImage? = nil
if let pixelBuffer = pixelBuffer {
ciImage = CIImage(cvPixelBuffer: pixelBuffer)
}
let temporaryContext = CIContext(options: nil)
var uiImage: CGImage? = nil
if let ciImage = ciImage {
uiImage = temporaryContext.createCGImage(ciImage, from: CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer!), height: CVPixelBufferGetHeight(pixelBuffer!)))
}
if let uiImage = uiImage {
result = UIImage(cgImage: uiImage)
}
CVPixelBufferUnlockBaseAddress(pixelBuffer!, [])
return result!
}
func convertFrameVImageYUV(_ frame: OTVideoFrame, to pixelBufferRef: CVPixelBuffer?) -> vImage_Error{
let start = CFAbsoluteTimeGetCurrent()
if pixelBufferRef == nil {
print("No PixelBuffer refrance found")
return vImage_Error(kvImageInvalidParameter)
}
let width = frame.format?.imageWidth ?? 0
let height = frame.format?.imageHeight ?? 0
let subsampledWidth = frame.format!.imageWidth/2
let subsampledHeight = frame.format!.imageHeight/2
print("subsample height \(subsampledHeight) \(subsampledWidth)")
let planeSize = calculatePlaneSize(forFrame: frame)
print("ysize : \(planeSize.ySize) \(planeSize.uSize) \(planeSize.vSize)")
let yPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.ySize)
let uPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.uSize)
let vPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.vSize)
memcpy(yPlane, frame.planes?.pointer(at: 0), planeSize.ySize)
memcpy(uPlane, frame.planes?.pointer(at: 1), planeSize.uSize)
memcpy(vPlane, frame.planes?.pointer(at: 2), planeSize.vSize)
let yStride = frame.format!.bytesPerRow.object(at: 0) as! Int
// multiply chroma strides by 2 as bytesPerRow represents 2x2 subsample
let uStride = frame.format!.bytesPerRow.object(at: 1) as! Int
let vStride = frame.format!.bytesPerRow.object(at: 2) as! Int
var yPlaneBuffer = vImage_Buffer(data: yPlane, height: vImagePixelCount(height), width: vImagePixelCount(width), rowBytes: yStride)
var uPlaneBuffer = vImage_Buffer(data: uPlane, height: vImagePixelCount(subsampledHeight), width: vImagePixelCount(subsampledWidth), rowBytes: uStride)
var vPlaneBuffer = vImage_Buffer(data: vPlane, height: vImagePixelCount(subsampledHeight), width: vImagePixelCount(subsampledWidth), rowBytes: vStride)
CVPixelBufferLockBaseAddress(pixelBufferRef!, .readOnly)
let pixelBufferData = CVPixelBufferGetBaseAddress(pixelBufferRef!)
let rowBytes = CVPixelBufferGetBytesPerRow(pixelBufferRef!)
var destinationImageBuffer = vImage_Buffer()
destinationImageBuffer.data = pixelBufferData
destinationImageBuffer.height = vImagePixelCount(height)
destinationImageBuffer.width = vImagePixelCount(width)
destinationImageBuffer.rowBytes = rowBytes
var permuteMap: [UInt8] = [3, 2, 1, 0] // BGRA
let convertError = vImageConvert_420Yp8_Cb8_Cr8ToARGB8888(&yPlaneBuffer, &uPlaneBuffer, &vPlaneBuffer, &destinationImageBuffer, &infoYpCbCrToARGB, &permuteMap, 255, vImage_Flags(kvImagePrintDiagnosticsToConsole))
CVPixelBufferUnlockBaseAddress(pixelBufferRef!, [])
yPlane.deallocate()
uPlane.deallocate()
vPlane.deallocate()
let end = CFAbsoluteTimeGetCurrent()
print("Decoding time \((end-start)*1000)")
return convertError
}
fileprivate func calculatePlaneSize(forFrame frame: OTVideoFrame)
-> (ySize: Int, uSize: Int, vSize: Int)
{
guard let frameFormat = frame.format
else {
return (0, 0 ,0)
}
let baseSize = Int(frameFormat.imageWidth * frameFormat.imageHeight) * MemoryLayout<GLubyte>.size
return (baseSize, baseSize / 4, baseSize / 4)
}
}
Performance tested on iPhone7, one frame conversion is less than a millisecond.
Here's what worked for me (I've taken your function and changed it a bit):
func createPixelBufferWithVideoFrame(_ frame: OTVideoFrame) -> CVPixelBuffer? {
if let fLock = frameLock {
fLock.lock()
let planeSize = calculatePlaneSize(forFrame: frame)
let yPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.ySize)
let uPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.uSize)
let vPlane = UnsafeMutablePointer<GLubyte>.allocate(capacity: planeSize.vSize)
memcpy(yPlane, frame.planes?.pointer(at: 0), planeSize.ySize)
memcpy(uPlane, frame.planes?.pointer(at: 1), planeSize.uSize)
memcpy(vPlane, frame.planes?.pointer(at: 2), planeSize.vSize)
let width = frame.format!.imageWidth
let height = frame.format!.imageHeight
var pixelBuffer: CVPixelBuffer? = nil
var err: CVReturn;
err = CVPixelBufferCreate(kCFAllocatorDefault, Int(width), Int(height), kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, nil, &pixelBuffer)
if (err != 0) {
NSLog("Error at CVPixelBufferCreate %d", err)
return nil
}
if let pixelBuffer = pixelBuffer {
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let yPlaneTo = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0)
memcpy(yPlaneTo, yPlane, planeSize.ySize)
let uvRow: Int = planeSize.uSize*2/Int(width)
let halfWidth: Int = Int(width)/2
if let uPlaneTo = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1) {
let uvPlaneTo = uPlaneTo.bindMemory(to: GLubyte.self, capacity: Int(uvRow*halfWidth*2))
for i in 0..<uvRow {
for j in 0..<halfWidth {
let dataIndex: Int = Int(i) * Int(halfWidth) + Int(j)
let uIndex: Int = (i * Int(width)) + Int(j) * 2
let vIndex: Int = uIndex + 1
uvPlaneTo[uIndex] = uPlane[dataIndex]
uvPlaneTo[vIndex] = vPlane[dataIndex]
}
}
}
}
fLock.unlock()
return pixelBuffer
}
return nil
}

How to solve Command failed due to signal: Segmentation fault: 11

I was searching for this issue here, and found out that this issue is not common one.
Everyone had different approaches to solve it. I'm using Firebase and Gifu framework. Actually for the last one - i copied all the files to my project, but that didn't helped either.
Here is my source code:
import FirebaseDatabase
import FirebaseAuth
import Firebase
import UIKit
import Gifu
class ViewController: UIViewController {
#IBOutlet weak var userImage: AnimatableImageView!
var displayedUserId = ""
var AcceptedOrRejected = ""
override func viewDidLoad() {
super.viewDidLoad()
let urlArray = ["http://i.imgur.com/VAWlQ0S.gif", "http://i.imgur.com/JDzGqvE.gif", "http://67.media.tumblr.com/4cd2a04b60bb867bb4746d682aa60020/tumblr_mjs2dvWX6x1rvn6njo1_400.gif", "https://media.giphy.com/media/TlK63ELk5OPDzpb6Tao/giphy.gif", "http://i3.photobucket.com/albums/y90/spicestas/GeriHalliwell-Calling-new1.gif", "http://media.tumblr.com/tumblr_lnb9aozmM71qbxrlp.gif"]
var counter = 1
for url in urlArray {
let nsUrl = NSURL(string: url)
let girls = ProfileClass()
girls.profilePhotoUrl = url
girls.profileGender = "female"
girls.profileName = "girlsname\(counter)"
girls.profileSurname = "girlsurname\(counter)"
girls.interest = "men"
girls.uid = "\(randomStringWithLength(45))"
counter++
girls.SaveUser()
}
//----setting variables
let label = UILabel(frame: CGRectMake(self.view.bounds.width / 2 - 100, self.view.bounds.height / 2 - 50, 300, 100))
//label.text = "Drag me!"
//label.textAlignment = NSTextAlignment.Center
self.view.addSubview(label)
let gesture = UIPanGestureRecognizer(target: self, action: Selector("wasDragged:"))
userImage.addGestureRecognizer(gesture)
userImage.userInteractionEnabled = true
//----getting access to database
let thisUserRef = URL_BASE.childByAppendingPath("profile")
thisUserRef.queryOrderedByChild("Interest").queryEqualToValue("men").observeEventType(.Value, withBlock: {
snapshot in
for child in snapshot.children{
self.displayedUserId = (child.value["uid"] as? String)!
let imageURL = child.value["photo"] as? String
let imURL = NSURL(string: imageURL!)
//print(imageURL)
if self.AcceptedOrRejected != "" {
let AcceptedArray = child.value[AcceptedOrRejected] as? Array
AcceptedArray.append(displayedUserId)
}
if let picData = NSData(contentsOfURL: imURL!) {
self.userImage.animateWithImageData(picData)
//self.userImage.image = UIImage(data: picData)
}
}
})
}
//-----Dragging function-----------
func wasDragged(gesture: UIPanGestureRecognizer) {
//set traslations
let translation = gesture.translationInView(self.view)
let label = gesture.view
//set center position
label!.center = CGPoint(x: self.view.bounds.width / 2 + translation.x, y: self.view.bounds.height / 2 - translation.y )
let xfromCenter = (label?.center.x)! - self.view.bounds.width / 2
let scale = min(100 / abs(xfromCenter),1)
var rotation = CGAffineTransformMakeRotation(xfromCenter / 200)
var strech = CGAffineTransformScale(rotation, scale, scale)
label?.transform = strech
if gesture.state == UIGestureRecognizerState.Ended {
if label?.center.x < 100 {
self.AcceptedOrRejected = "Accepted"
} else if label?.center.x > self.view.bounds.width / 100 {
self.AcceptedOrRejected = "Rejected"
}
rotation = CGAffineTransformMakeRotation(0)
strech = CGAffineTransformScale(rotation, 1, 1)
label?.transform = strech
label?.center = CGPoint(x: self.view.bounds.width / 2 , y: self.view.bounds.height / 2 )
}
}
//---Function for generating randomn userid
func randomStringWithLength (len : Int) -> NSString {
let letters : NSString = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
let randomString : NSMutableString = NSMutableString(capacity: len)
for (var i=0; i < len; i += 1){
let length = UInt32 (letters.length)
let rand = arc4random_uniform(length)
randomString.appendFormat("%C", letters.characterAtIndex(Int(rand)))
}
return randomString
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
if segue.identifier == "LogOutSegue" {
try! FIRAuth.auth()!.signOut()
}
}
}
And I get this error
1. While emitting SIL for 'viewDidLoad' at /Users/zkid18/Desktop/wrk/Bloom/Bloom/ViewController.swift:23:14
2. While silgen closureexpr SIL function #_TFFC5Bloom14ViewController11viewDidLoadFT_T_U_FCSo15FIRDataSnapshotT_ for expression at [/Users/zkid18/Desktop/wrk/Bloom/Bloom/ViewController.swift:68:114 - line:107:9] RangeText="{
snapshot in
for child in snapshot.children{
self.displayedUserId = (child.value["uid"] as? String)!
let imageURL = child.value["photo"] as? String
let imURL = NSURL(string: imageURL!)
//print(imageURL)
if self.AcceptedOrRejected != "" {
let AcceptedArray = child.value[AcceptedOrRejected] as? Array
AcceptedArray.append(displayedUserId)
}
if let picData = NSData(contentsOfURL: imURL!) {
self.userImage.animateWithImageData(picData)
//self.userImage.image = UIImage(data: picData)
}
}
}"
I don't really know what to do with that
I just attempted a conversion to Swift 3 to get a jump on fixing issues in my code. I have similar error but I just integrated Firebase into all of my project. I found that by removing the pod and framework from the app and commenting out all firebase code fixed this compile issue.

ios Swift - Group separated sprites with syncronized animation

I am trying to make a synchronized animation (a large video decomposed by frames on separated and smaller puzzle jigsaw parts). This game is a video puzzle. Here is the code I use in three parts by way of example:
func Anim_Puzzle13 (Node13 : SKPuzzle) {
let puzzle13 = SKAction.animateWithTextures(sheet_puzzle13.Puzzle13_(), timePerFrame: 0.066)
NPuzzle13 = Node13
NPuzzle13.runAction(SKAction.repeatActionForever(puzzle13))
NPuzzle13.position = CGPoint(x: 500, y: 400)
NPuzzle13.zPosition = 1
}
func Anim_Puzzle19 (Node19 : SKPuzzle) {
let puzzle19 = SKAction.animateWithTextures(sheet_puzzle19.Puzzle19_(), timePerFrame: 0.066)
NPuzzle19 = Node19
NPuzzle19.runAction(SKAction.repeatActionForever(puzzle19))
NPuzzle19.position = CGPoint(x: 600, y: 500)
NPuzzle19.zPosition = 1
}
func Anim_Puzzle30 (Node30 : SKPuzzle) {
let puzzle30 = SKAction.animateWithTextures(sheet_puzzle30.Puzzle30_(), timePerFrame: 0.066)
NPuzzle30 = Node30
NPuzzle30.runAction(SKAction.repeatActionForever(puzzle30))
NPuzzle30.position = CGPoint(x: 700, y: 600)
NPuzzle30.zPosition = 1
}
It works well but it does not synchronize between the animations and the video has no integrity. I searched for a long time for a solution to make the animations synchronize; I see two possibilities: first is to create a unique SKNode() with all the jigsaw parts inside, but I want to be able to move each jigsaw part independently and have had no success getting a synchronized animation with this method.
The other way seem to be to create a group with all the animations together but this doesn't work, and causes the application to stop.
Here is all the code I use:
import SpriteKit
import UIKit
import AVFoundation
import AVKit
import CoreFoundation
private let kpuzzleNodeName = "puzzle"
private let kdancing = "dancing"
class SKPuzzle: SKSpriteNode {
var name2:String = "";
}
class GameScene: SKScene {
var background = SKVideoNode(videoFileNamed: "Video_Socle.mov")
var selectedNode = SKPuzzle()
var player:AVPlayer?
var videoNode:SKVideoNode?
var NPuzzle13 = SKPuzzle()
var NPuzzle19 = SKPuzzle()
var NPuzzle30 = SKPuzzle()
var NPuzzle11 = SKPuzzle()
var NPuzzle29 = SKPuzzle()
var NPuzzle35 = SKPuzzle()
var puzzle13 = SKAction()
var puzzle19 = SKAction()
var puzzle30 = SKAction()
var puzzle11 = SKAction()
var puzzle29 = SKAction()
var puzzle35 = SKAction()
let sheet_puzzle13 = Puzzle13()
let sheet_puzzle19 = Puzzle19()
let sheet_puzzle30 = Puzzle30()
let sheet_puzzle11 = Puzzle11()
let sheet_puzzle29 = Puzzle29()
let sheet_puzzle35 = Puzzle35()
override init(size: CGSize) {
super.init(size: size)
// 1
self.background.name = kdancing
self.background.anchorPoint = CGPointZero
background.zPosition = 0
self.addChild(background)
// 2
let sheet = Statiques()
let sprite_dancing1 = SKSpriteNode(texture: sheet.Dancing1())
let sprite_dancing2 = SKSpriteNode(texture: sheet.Dancing2())
sprite_dancing1.name = kdancing
sprite_dancing2.name = kdancing
sprite_dancing1.position = CGPoint(x: 837, y: 752)
sprite_dancing1.zPosition = 2
sprite_dancing2.position = CGPoint(x: 1241, y: 752)
sprite_dancing2.zPosition = 2
background.addChild(sprite_dancing1)
background.addChild(sprite_dancing2)
let imageNames = [sheet.Puzzle13() , sheet.Puzzle19(), sheet.Puzzle30(), sheet.Puzzle11(), sheet.Puzzle29(), sheet.Puzzle35() ]
for i in 0..<imageNames.count {
let imageName = imageNames[i]
let sprite = SKPuzzle(texture: imageName)
sprite.name = kpuzzleNodeName
sprite.name2 = "\(i)"
let offsetFraction = (CGFloat(i) + 1.0)/(CGFloat(imageNames.count) + 1.0)
sprite.position = CGPoint(x: size.width * offsetFraction, y: size.height / 2)
sprite.zPosition = 3
background.addChild(sprite)
}
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func touchesBegan(touches: Set<UITouch>, withEvent event: UIEvent?) {
for touch: AnyObject in touches {
let positionInScene = touch.locationInNode(self)
selectNodeForTouch(positionInScene)
}
}
override func didMoveToView(view: SKView) {
let urlStr = NSBundle.mainBundle().pathForResource("Video_Socle", ofType: "mov")
let url = NSURL(fileURLWithPath: urlStr!)
player = AVPlayer(URL: url)
NSNotificationCenter.defaultCenter().addObserverForName(AVPlayerItemDidPlayToEndTimeNotification, object: player!.currentItem, queue: nil)
{ notification in
let t1 = CMTimeMake(5, 100);
self.player!.seekToTime(t1)
self.player!.play()
}
videoNode = SKVideoNode(AVPlayer: player!)
videoNode!.position = CGPointMake(frame.size.width/2, frame.size.height/2)
videoNode!.size = CGSize(width: 2048, height: 1536)
videoNode!.zPosition = 0
background.addChild(videoNode!)
videoNode!.play()
let gestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(GameScene.handlePanFrom(_:)))
self.view!.addGestureRecognizer(gestureRecognizer)
}
func handlePanFrom(recognizer : UIPanGestureRecognizer) {
if recognizer.state == .Began {
var touchLocation = recognizer.locationInView(recognizer.view)
touchLocation = self.convertPointFromView(touchLocation)
self.selectNodeForTouch(touchLocation)
} else if recognizer.state == .Changed {
var translation = recognizer.translationInView(recognizer.view!)
translation = CGPoint(x: translation.x, y: -translation.y)
self.panForTranslation(translation)
recognizer.setTranslation(CGPointZero, inView: recognizer.view)
} else if recognizer.state == .Ended {
}
}
func degToRad(degree: Double) -> CGFloat {
return CGFloat(degree / 180.0 * M_PI)
}
func selectNodeForTouch(touchLocation : CGPoint) {
// 1
let touchedNode = self.nodeAtPoint(touchLocation)
if touchedNode is SKPuzzle {
// 2
if !selectedNode.isEqual(touchedNode) {
selectedNode.runAction(SKAction.rotateToAngle(0.0, duration: 0.1))
selectedNode = touchedNode as! SKPuzzle
// 3
if touchedNode.name! == kpuzzleNodeName {
let sequence = SKAction.sequence([SKAction.rotateByAngle(degToRad(-4.0), duration: 0.1),
SKAction.rotateByAngle(0.0, duration: 0.1),
SKAction.rotateByAngle(degToRad(4.0), duration: 0.1)])
selectedNode.runAction(SKAction.repeatActionForever(sequence))
}
}
}
}
func panForTranslation(translation : CGPoint) {
let position = selectedNode.position
if selectedNode.name! == kpuzzleNodeName {
selectedNode.position = CGPoint(x: position.x + translation.x * 2, y: position.y + translation.y * 2)
print (selectedNode.name)
print (selectedNode.name2)
if selectedNode.name2 == "0" {
Anim_Puzzle13(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "1" {
Anim_Puzzle19(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "2" {
Anim_Puzzle30(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "3" {
Anim_Puzzle11(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "4" {
Anim_Puzzle29(selectedNode)
}
print (selectedNode.name2)
if selectedNode.name2 == "5" {
Anim_Puzzle35(selectedNode)
}
}
}
func Anim_Puzzle13 (Node13 : SKPuzzle) {
let puzzle13 = SKAction.animateWithTextures(sheet_puzzle13.Puzzle13_(), timePerFrame: 0.066)
NPuzzle13 = Node13
NPuzzle13.runAction(SKAction.repeatActionForever(puzzle13))
NPuzzle13.position = CGPoint(x: 500, y: 400)
NPuzzle13.zPosition = 1
}
func Anim_Puzzle19 (Node19 : SKPuzzle) {
let puzzle19 = SKAction.animateWithTextures(sheet_puzzle19.Puzzle19_(), timePerFrame: 0.066)
NPuzzle19 = Node19
NPuzzle19.runAction(SKAction.repeatActionForever(puzzle19))
NPuzzle19.position = CGPoint(x: 600, y: 500)
NPuzzle19.zPosition = 1
}
func Anim_Puzzle30 (Node30 : SKPuzzle) {
let puzzle30 = SKAction.animateWithTextures(sheet_puzzle30.Puzzle30_(), timePerFrame: 0.066)
NPuzzle30 = Node30
NPuzzle30.runAction(SKAction.repeatActionForever(puzzle30))
NPuzzle30.position = CGPoint(x: 700, y: 600)
NPuzzle30.zPosition = 1
}
func Anim_Puzzle11 (Node11 : SKPuzzle) {
let puzzle11 = SKAction.animateWithTextures(sheet_puzzle11.Puzzle11_(), timePerFrame: 0.066)
NPuzzle11 = Node11
NPuzzle11.runAction(SKAction.repeatActionForever(puzzle11))
NPuzzle11.position = CGPoint(x: 800, y: 700)
NPuzzle11.zPosition = 1
}
func Anim_Puzzle29 (Node29 : SKPuzzle) {
let puzzle29 = SKAction.animateWithTextures(sheet_puzzle29.Puzzle29_(), timePerFrame: 0.066)
NPuzzle29 = Node29
NPuzzle29.runAction(SKAction.repeatActionForever(puzzle29))
NPuzzle29.position = CGPoint(x: 900, y: 800)
NPuzzle29.zPosition = 1
}
func Anim_Puzzle35 (Node35 : SKPuzzle) {
let puzzle35 = SKAction.animateWithTextures(sheet_puzzle35.Puzzle35_(), timePerFrame: 0.066)
NPuzzle35 = Node35
NPuzzle35.runAction(SKAction.repeatActionForever(puzzle35))
NPuzzle35.position = CGPoint(x: 1000, y: 900)
NPuzzle35.zPosition = 1
}
}
I'm not sure if it's possible to synchronize animations like this: with SKAction() in several separated parts, because it's necessary to be able to select them individually.
UPDATE: I've tried to follow the action group way but I have the same animation playing on each sprite instead of a different animation synchronized for each sprite (6 different animations synchronized: 6 different sprites):
let sheet13 = Puzzle13()
let sheet19 = Puzzle19()
let sheet30 = Puzzle30()
let sheet11 = Puzzle11()
let sheet29 = Puzzle29()
let sheet35 = Puzzle35()
let imageAnims = [sheet13.Puzzle13_0000() , sheet19.Puzzle19_0000(), sheet30.Puzzle30_0000(), sheet11.Puzzle11_0000(), sheet29.Puzzle29_0000(), sheet35.Puzzle35_0000() ]
let puzzle13 = SKAction.animateWithTextures(sheet13.Puzzle13_(), timePerFrame: 0.066)
let puzzle19 = SKAction.animateWithTextures(sheet19.Puzzle19_(), timePerFrame: 0.066)
let puzzle30 = SKAction.animateWithTextures(sheet30.Puzzle30_(), timePerFrame: 0.066)
let puzzle11 = SKAction.animateWithTextures(sheet11.Puzzle11_(), timePerFrame: 0.066)
let puzzle29 = SKAction.animateWithTextures(sheet29.Puzzle29_(), timePerFrame: 0.066)
let puzzle35 = SKAction.animateWithTextures(sheet35.Puzzle35_(), timePerFrame: 0.066)
let group = SKAction.group([puzzle13,puzzle19,puzzle30,puzzle11,puzzle29,puzzle35])
for i in 0..<imageAnims.count {
let imageAnim = imageAnims[i]
let spriteAnim = SKPuzzle(texture: imageAnim)
spriteAnim.name = kanimNodeName
spriteAnim.name2 = "\(i)"
let offsetFraction = (CGFloat(i) + 1.0)/(CGFloat(imageAnims.count) + 1.0)
spriteAnim.position = CGPoint(x: ((size.width)*2) * offsetFraction, y: size.height * 1.5)
spriteAnim.zPosition = 3
spriteAnim.runAction(SKAction.repeatActionForever(group))
background.addChild(spriteAnim)
}
First of all I want to list two differents method to create your SKAction:
Starting with parallel actions by using SKAction.group:
let sprite = SKSpriteNode(imageNamed:"Spaceship")
let scale = SKAction.scaleTo(0.1, duration: 0.5)
let fade = SKAction.fadeOutWithDuration(0.5)
let group = SKAction.group([scale, fade])
sprite.runAction(group)
Another useful method can be the completion , so you can know when an SKAction was finished:
extension SKNode
{
func runAction( action: SKAction!, withKey: String!, optionalCompletion: dispatch_block_t? )
{
if let completion = optionalCompletion
{
let completionAction = SKAction.runBlock( completion )
let compositeAction = SKAction.sequence([ action, completionAction ])
runAction( compositeAction, withKey: withKey )
}
else
{
runAction( action, withKey: withKey )
}
}
}
Usage:
node.runAction(move,withKey:"swipeMove",optionalCompletion: {
// here the action is finished, do whatever you want
})
After that, about your project, I've seen many node.runAction.., you can also adopt this strategy to sinchronize your actions:
var myAction30 :SKAction!
var myAction31 :SKAction!
self.myAction30 = SKAction.repeatActionForever(puzzle30)
self.myAction31 = SKAction.repeatActionForever(puzzle31)
let group = SKAction.group([self.myAction30, self.myAction31])
self.runAction(group)
UPDATE: I've seen your update part, when you speak about "synchronize" probably you don't means the "running in parallel" actions.
So, if you want to run an action after another there is also:
self.myAction30 = SKAction.repeatActionForever(puzzle30)
self.myAction31 = SKAction.repeatActionForever(puzzle31)
let sequence = SKAction.sequence([self.myAction30, self.myAction31])
self.runAction(sequence)

Swift 2 : AVAssetReader and NSInputStream Audio Graph

I'm trying to convert an example from Bob McCune's Learning AVFoundation book and having some issues using AVAssetReader and NSInputStream. The graph should be a pure sine wave but the values seem reflected on the X-axis somehow.
I've tried every iteration of byte swapping I could think of and that didn't work.
Playground posted to github here:
https://github.com/justinlevi/AVAssetReader
//: Playground - noun: a place where people can play
import UIKit
import AVFoundation
import XCPlayground
func plotArrayInPlayground<T>(arrayToPlot:Array<T>, title:String) {
for currentValue in arrayToPlot {
XCPCaptureValue(title, value: currentValue)
}
}
class SSSampleDataFilter {
var sampleData:NSData?
init(data:NSData) {
sampleData = data
}
func filteredSamplesForSize(size:CGSize) -> [Int]{
var filterSamples = [UInt16]()
if let sampleData = sampleData {
let sampleCount = sampleData.length
let binSize = CGFloat(sampleCount) / size.width
let stream = NSInputStream(data: sampleData)
stream.open()
var readBuffer = Array<UInt8>(count: 16 * 1024, repeatedValue: 0)
var totalBytesRead = 0
let size = sizeof(UInt16)
while (totalBytesRead < sampleData.length) {
let numberOfBytesRead = stream.read(&readBuffer, maxLength: size)
let u16: UInt16 = UnsafePointer<UInt16>(readBuffer).memory
var sampleBin = [UInt16]()
for _ in 0..<Int(binSize) {
sampleBin.append(u16)
}
filterSamples.append(sampleBin.maxElement()!)
totalBytesRead += numberOfBytesRead
}
//plotArrayInPlayground(filterSamples, title: "Samples")
}
return [0]
}
}
let sineURL = NSBundle.mainBundle().URLForResource("440.0-sine", withExtension: "aif")!
let asset = AVAsset(URL: sineURL)
var assetReader:AVAssetReader
do{
assetReader = try AVAssetReader(asset: asset)
}catch{
fatalError("Unable to read Asset: \(error) : \(__FUNCTION__).")
}
let track = asset.tracksWithMediaType(AVMediaTypeAudio).first
let outputSettings: [String:Int] =
[ AVFormatIDKey: Int(kAudioFormatLinearPCM),
AVLinearPCMIsBigEndianKey: 0,
AVLinearPCMIsFloatKey: 0,
AVLinearPCMBitDepthKey: 16,
AVLinearPCMIsNonInterleaved: 0]
let trackOutput = AVAssetReaderTrackOutput(track: track!, outputSettings: outputSettings)
assetReader.addOutput(trackOutput)
assetReader.startReading()
var sampleData = NSMutableData()
while assetReader.status == AVAssetReaderStatus.Reading {
if let sampleBufferRef = trackOutput.copyNextSampleBuffer() {
if let blockBufferRef = CMSampleBufferGetDataBuffer(sampleBufferRef) {
let bufferLength = CMBlockBufferGetDataLength(blockBufferRef)
var data = NSMutableData(length: bufferLength)
CMBlockBufferCopyDataBytes(blockBufferRef, 0, bufferLength, data!.mutableBytes)
var samples = UnsafeMutablePointer<Int16>(data!.mutableBytes)
sampleData.appendBytes(samples, length: bufferLength)
CMSampleBufferInvalidate(sampleBufferRef)
}
}
}
let view = UIView(frame: CGRectMake(0, 0, 375.0, 667.0))
//view.backgroundColor = UIColor.lightGrayColor()
if assetReader.status == AVAssetReaderStatus.Completed {
print("complete")
let filter = SSSampleDataFilter(data: sampleData)
let filteredSamples = filter.filteredSamplesForSize(view.bounds.size)
}
//XCPShowView("Bezier Path", view: view)
XCPSetExecutionShouldContinueIndefinitely(true)
Here's what the graph should look like (taken from Audacity)
Here's what the graph looks like in the playground
Unfortunately your playground doesn't render anything for me in Xcode7b5, however you're asking the AVAssetReaderTrackOutput to give you signed 16bit ints, yet your code treats them as unsigned UInt16s (and your Audacity file uses floats).
Changing all instances of UInt16 to Int16 in your playground seems to print sensible looking sinusoidal data.

Resources