Off Screen Rendering - ios

In off screen rendering in metal
let textureDescriptors = MTLTextureDescriptor()
textureDescriptors.textureType = MTLTextureType.type2D
let screenRatio = UIScreen.main.scale
textureDescriptors.width = Int((DrawingManager.shared.size?.width)!) * Int(screenRatio)
textureDescriptors.height = Int((DrawingManager.shared.size?.height)!) * Int(screenRatio)
textureDescriptors.pixelFormat = .bgra8Unorm
textureDescriptors.storageMode = .shared
textureDescriptors.usage = [.renderTarget, .shaderRead]
ssTexture = device.makeTexture(descriptor: textureDescriptors)
ssTexture.label = "ssTexture"
Here the texture is in Clear color. Is it possible to load a image texture and is it posible to render the image texture in Draw Method
let renderPass = MTLRenderPassDescriptor()
renderPass.colorAttachments[0].loadAction = .clear
renderPass.colorAttachments[0].clearColor = MTLClearColorMake( 0.0, 0.0, 0.0, 0.0)
renderPass.colorAttachments[0].texture = ssTexture
renderPass.colorAttachments[0].storeAction = .store

I'm not sure what you're asking.
There's MTLTextureLoader for creating textures initialized with the contents of an image.
You can use the replace(region:...) methods of MTLTexture to fill all or part of a texture with image data.
You can use MTLBlitCommandEncoder to copy data from one texture to (all or part of) another or from a buffer to a texture.
You can draw to a texture or write to it from a compute shader.
It's a general-purpose API. There are many ways to do the things you seem to be asking. What have you tried? In what way did those attempts fail to achieve what you want?

Related

3D viewer for iOS using MetalKit and Swift - Depth doesn’t work

I’m using Metal with Swift to build a 3D viewer for iOS and I have some issues to make the depth working. From now, I can draw and render a single shape correctly in 3D (like a simple square plane (4 triangles (2 for each face)) or a tetrahedron (4 triangles)).
However, when I try to draw 2 shapes together, the depth between these two shapes doesn’t work. For example, a plane is placed at Z axes = 0 behind a tetra which is placed at Z > 0. If I look a this scene from the back (camera placed somewhere at Z < 0), it’s ok. But when I look at this scene from the front (camera placed somewhere at Z > 0), it doesn’t work. The plane is drawn before the tetra even if it is placed behind the tetra.
I think that the plane is always drawn on the screen before the tetra (no matter the position of the camera) because the call of drawPrimitives for the plane is done before the call for the tetra. However, I was thinking that all the depth and stencil settings will deal with that properly.
I don’t know if the depth isn’t working because depth texture, stencil state and so on are not correctly set or because each shape is drawn in a different call of drawPrimitives.
In other words, do I have to draw all shapes in the same call of drawPrimitives to make the depth working ? The idea of this multiple call to drawPrimitives is to deal with different kinds of primitive type for each shape (triangle or line or …).
This is how I set the depth stencil state and the depth texture and the render pipeline :
init() {
// some miscellaneous initialisation …
// …
// all MTL stuff :
commandQueue = device.makeCommandQueue()
// Stencil descriptor
let depthStencilDescriptor = MTLDepthStencilDescriptor()
depthStencilDescriptor.depthCompareFunction = .less
depthStencilDescriptor.isDepthWriteEnabled = true
depthStencilState = device.makeDepthStencilState(descriptor: depthStencilDescriptor)!
// Library and pipeline descriptor & state
let library = try! device.makeLibrary(source: shaders, options: nil)
// Our vertex function name
let vertexFunction = library.makeFunction(name: "basic_vertex_function")
// Our fragment function name
let fragmentFunction = library.makeFunction(name: "basic_fragment_function")
// Create basic descriptor
let renderPipelineDescriptor = MTLRenderPipelineDescriptor()
// Attach the pixel format that si the same as the MetalView
renderPipelineDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
renderPipelineDescriptor.depthAttachmentPixelFormat = .depth32Float_stencil8
renderPipelineDescriptor.stencilAttachmentPixelFormat = .depth32Float_stencil8
//renderPipelineDescriptor.stencilAttachmentPixelFormat = .stencil8
// Attach the shader functions
renderPipelineDescriptor.vertexFunction = vertexFunction
renderPipelineDescriptor.fragmentFunction = fragmentFunction
// Try to update the state of the renderPipeline
do {
renderPipelineState = try device.makeRenderPipelineState(descriptor: renderPipelineDescriptor)
} catch {
print(error.localizedDescription)
}
// Depth Texture
let desc = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .stencil8, width: 576, height: 723, mipmapped: false)
desc.storageMode = .private
desc.usage = .pixelFormatView
depthTexture = device.makeTexture(descriptor: desc)!
// Uniforms buffer
modelMatrix = Matrix4()
modelMatrix.multiplyLeft(worldMatrix)
uniformBuffer = device.makeBuffer( length: MemoryLayout<Float>.stride*16*2, options: [])
let bufferPointer = uniformBuffer.contents()
memcpy(bufferPointer, &modelMatrix.matrix.m, MemoryLayout<Float>.stride * 16)
memcpy(bufferPointer + MemoryLayout<Float>.stride * 16, &projectionMatrix.matrix.m, MemoryLayout<Float>.stride * 16)
}
And the draw function :
function draw(in view: MTKView) {
// create render pass descriptor
guard let drawable = view.currentDrawable,
let renderPassDescriptor = view.currentRenderPassDescriptor else {
return
}
renderPassDescriptor.depthAttachment.texture = depthTexture
renderPassDescriptor.depthAttachment.clearDepth = 1.0
//renderPassDescriptor.depthAttachment.loadAction = .load
renderPassDescriptor.depthAttachment.loadAction = .clear
renderPassDescriptor.depthAttachment.storeAction = .store
// Create a buffer from the commandQueue
let commandBuffer = commandQueue.makeCommandBuffer()
let commandEncoder = commandBuffer?.makeRenderCommandEncoder(descriptor: renderPassDescriptor)
commandEncoder?.setRenderPipelineState(renderPipelineState)
commandEncoder?.setFrontFacing(.counterClockwise)
commandEncoder?.setCullMode(.back)
commandEncoder?.setDepthStencilState(depthStencilState)
// Draw all obj in objects
// objects = array of Object; each object describing vertices and primitive type of a shape
// objects[0] = Plane, objects[1] = Tetra
for obj in objects {
createVertexBuffers(device: view.device!, vertices: obj.vertices)
commandEncoder?.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
commandEncoder?.setVertexBuffer(uniformBuffer, offset: 0, index: 1)
commandEncoder?.drawPrimitives(type: obj.primitive, vertexStart: 0, vertexCount: obj.vertices.count)
}
commandEncoder?.endEncoding()
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
Does anyone has an idea of what is wrong or missing ?
Any advice is welcome !
Edited 09/23/2022: Code updated
Few things of the top of my head:
First
let desc = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .depth32Float_stencil8, width: 576, height: 723, mipmapped: false)
Second
renderPipelineDescriptor.depthAttachmentPixelFormat = .depth32Float_stencil8
Notice the pixeFormat should be same in both places, and since you seem to be using stencil test as well so depth32Float_stencil8 will be perfect.
Third
Now another thing you seem to be missing is, clearing depth texture before every render pass, am I right?
So, you should set load action of depth attachment to .clear, like this:
renderPassDescriptor.depthAttachment.loadAction = .clear
Fourth (Subjective to your usecase)*
If none of the above works, you might need to discard framents with alpha = 0 in your fragment function by calling discard_fragment() when color you are returning has alpha 0
Also note for future:
Ideally you want depth texture to be fresh and empty when every new frame starts getting rendered (first draw call of a render pass) and then reuse it for subsequent draw calls in same render pass by setting load action .load and store action .store.
ex: Assuming you have 3 draw calls, say drawing polygons wiz triangle, rectangle, sphere in one frame, then your depth attachment setup should be like this:
Frame 1 Starts:
First Draw: triangle
loadAction: Clear
storeAction: Store
Second Draw: rectangle
loadAction: load
storeAction: Store
Third Draw: sphere
loadAction: load
storeAction: store/dontcare
Frame 2 Starts: Notice you clear depth buffer for 1st draw call of new frame
First Draw: triangle
loadAction: Clear
storeAction: Store
Second Draw: rectangle
loadAction: load
storeAction: Store
Third Draw: sphere
loadAction: load
storeAction: store/dontcare
Your depth texture pixel format is not correct, try to change its pixel format to: MTLPixelFormatDepth32Float or MTLPixelFormatDepth32Float_Stencil8.

How to load a existing Texture using MTLRenderPassDescriptor

I am having a image in the asset. Then I am changing it to MTLTexture. I want to pass this texture to the shader function and append the texture and add the smudge feature to the passed texture using shader functions.
Currently I am passing the texture in MTLRenderPassDescriptor like below.
let renderPassWC = MTLRenderPassDescriptor()
renderPassWC.colorAttachments[0].texture = ssTexture
renderPassWC.colorAttachments[0].loadAction = .load
renderPassWC.colorAttachments[0].storeAction = .store
When I edit the texture in Shader function like Moving the pixel of ssTexture to adjacent Pixel. The movement don't stops. Because the operations I am doing in shader functions continuously operating in the appended Textures every draw cycle.
So I think rather than loadAction with load I feel clear will be a option but texture which passed become clear when I changed the code as below
renderPassWC.colorAttachments[0].texture = ssTexture
renderPassWC.colorAttachments[0].loadAction = .clear
renderPassWC.colorAttachments[0].clearColor = MTLClearColorMake( 0.0, 0.0, 0.0, 0.0)
.Any possible way to pass the image texture with clear.

Renderpass Descriptor in Metal

I am new to Metal. How many MTLRenderpassdescriptor we can create in a single Application any limit ?
I want to have three MTLRenderpassdescriptor is it possible. One I need to draw quads according to finger position. It is like Offscreen Rendering. There will be texture output.
I want to pass the output texture to another renderpass descriptor to process the color of it which is also similar to Offscreen Rendering.Here new output texture will be available
I want to present new output Texture to the screen. Is it Possible ? When I create three renderpass descriptors only two is available.
First offscreen rendering It works fine
let renderPass = MTLRenderPassDescriptor()
renderPass.colorAttachments[0].texture = ssTexture
renderPass.colorAttachments[0].loadAction = .clear
renderPass.colorAttachments[0].clearColor = MTLClearColorMake( 0.0, 0.0, 0.0, 0.0)
renderPass.colorAttachments[0].storeAction = .store
let commandBuffer = commandQueue.makeCommandBuffer()
var commandEncoder = commandBuffer?.makeRenderCommandEncoder(descriptor: renderPass)
for scene in scenesTool{
scene.render(commandEncoder: commandEncoder!)
}
commandEncoder?.endEncoding()
This renderpass descriptor is not available in Debug as well.
let renderPassWC = MTLRenderPassDescriptor()
renderPassWC.colorAttachments[0].texture = ssCurrentTexture
renderPassWC.colorAttachments[0].loadAction = .clear
renderPassWC.colorAttachments[0].clearColor = MTLClearColorMake( 0.0, 0.0, 0.0, 0.0)
renderPassWC.colorAttachments[0].storeAction = .store
let commandBufferWC = commandQueue.makeCommandBuffer()
let commandEncoderWC = commandBufferWC?.makeRenderCommandEncoder(descriptor: renderPassWC)
for scene in scenesWCTool{
print(" msbksadbdksabfkasbd")
scene.render(commandEncoder: commandEncoderWC!)
}
commandEncoderWC?.endEncoding()
Third Renderpass Descriptor is working Fine.
let descriptor = view.currentRenderPassDescriptor
commandEncoder = commandBuffer?.makeRenderCommandEncoder(descriptor: descriptor!)
for x in canvasTemporaryScenes{
x.updateCanvas(texture: ssTexture!)
x.render(commandEncoder: commandEncoder!)
}
commandEncoder?.endEncoding()
guard let drawable = view.currentDrawable else { return }
commandBuffer?.present(drawable)
commandBuffer?.commit()
commandBuffer?.waitUntilCompleted()
MTLRenderPassDescriptor objects are cheap to create, unlike some other kinds of objects in Metal (e.g. pipeline state objects). You are expected to create descriptors any time you need them. You don't need to keep them for a long period of time. Just recreate them each time you are going to create a render command encoder. For example, creating multiple render pass descriptors per frame is perfectly normal. There's no specific limit on the number you can create.
I have no idea what you mean by "When I create three renderpass descriptors only two is available" and "This renderpass descriptor is not available in Debug". What do you mean by "available" in these sentences? Explain exactly what results you want and expect and what's actually happening that's different than that.

Metal API validation crash

I wrote the following code to implement additive blending for offscreen rendering to a Metal texture but it crashes if Metal API Validation is enabled:
validateRenderPassDescriptor:487: failed assertion `Texture at colorAttachment[0] has usage (0x02) which doesn't specify MTLTextureUsageRenderTarget (0x04)'
Here is the code:
let renderPipelineDescriptorGreen = MTLRenderPipelineDescriptor()
renderPipelineDescriptorGreen.vertexFunction = vertexFunctionGreen
renderPipelineDescriptorGreen.fragmentFunction = fragmentFunctionAccumulator
renderPipelineDescriptorGreen.colorAttachments[0].pixelFormat = .bgra8Unorm
renderPipelineDescriptorGreen.colorAttachments[0].isBlendingEnabled = true
renderPipelineDescriptorGreen.colorAttachments[0].rgbBlendOperation = .add
renderPipelineDescriptorGreen.colorAttachments[0].sourceRGBBlendFactor = .one
renderPipelineDescriptorGreen.colorAttachments[0].destinationRGBBlendFactor = .one
All I want to implement is additive color blending, something like this in OpenGLES:
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
glEnable(GL_BLEND);
What is wrong in my code?
Ok I found the solution. The solution was to set texture usage flag MTLTextureUsage.renderTarget in addition to (or in place of depending upon usage, shaderRead or shaderWrite when creating the texture:
let textureDescriptor = MTLTextureDescriptor()
textureDescriptor.textureType = .type3D
textureDescriptor.pixelFormat = .bgra8Unorm
textureDescriptor.width = 256
textureDescriptor.height = 1
textureDescriptor.usage = .renderTarget
let texture = device.makeTexture(descriptor: textureDescriptor)

How do you play a video with alpha channel using AVFoundation?

I have an AR application which uses SceneKit, and imports a video on to scene using AVPlayer and thereby adding it as a child node of an SKVideo node.
The video is visible as it is supposed to, but the transparency in the video is not achieved.
Code as follows:
let spriteKitScene = SKScene(size: CGSize(width: self.sceneView.frame.width, height: self.sceneView.frame.height))
spriteKitScene.scaleMode = .aspectFit
guard let fileURL = Bundle.main.url(forResource: "Triple_Tap_1", withExtension: "mp4") else {
return
}
let videoPlayer = AVPlayer(url: fileURL)
videoPlayer.actionAtItemEnd = .none
let videoSpriteKitNode = SKVideoNode(avPlayer: videoPlayer)
videoSpriteKitNode.position = CGPoint(x: spriteKitScene.size.width / 2.0, y: spriteKitScene.size.height / 2.0)
videoSpriteKitNode.size = spriteKitScene.size
videoSpriteKitNode.yScale = -1.0
videoSpriteKitNode.play()
spriteKitScene.backgroundColor = .clear
spriteKitScene.addChild(videoSpriteKitNode)
let background = SCNPlane(width: CGFloat(2), height: CGFloat(2))
background.firstMaterial?.diffuse.contents = spriteKitScene
let backgroundNode = SCNNode(geometry: background)
backgroundNode.position = position
backgroundNode.constraints = [SCNBillboardConstraint()]
backgroundNode.rotation.z = 0
self.sceneView.scene.rootNode.addChildNode(backgroundNode)
// Create a transform with a translation of 0.2 meters in front of the camera.
var translation = matrix_identity_float4x4
translation.columns.3.z = -0.2
let transform = simd_mul((self.session.currentFrame?.camera.transform)!, translation)
// Add a new anchor to the session.
let anchor = ARAnchor(transform: transform)
self.sceneView.session.add(anchor: anchor)
What could be the best way to implement the transparency of the Triple_Tap_1 video in this case.
I have gone through some stack overflow questions on this topic, and found the only solution to be a KittyBoom repository that was created somewhere in 2013, using Objective C.
I'm hoping that the community can reveal a better solution for this problem. GPUImage library is not something I could get to work.
I've came up with two ways of making this possible. Both utilize surface shader modifiers. Detailed information on shader modifiers can be found in Apple Developer Documentation.
Here's an example project I've created.
1. Masking
You would need to create another video that represents a transparency mask. In that video black = fully opaque, white = fully transparent (or any other way you would like to represent transparency, you would just need to tinker the surface shader).
Create a SKScene with this video just like you do in the code you provided and put it into material.transparent.contents (the same material that you put diffuse video contents into)
let spriteKitOpaqueScene = SKScene(...)
let spriteKitMaskScene = SKScene(...)
... // creating SKVideoNodes and AVPlayers for each video etc
let material = SCNMaterial()
material.diffuse.contents = spriteKitOpaqueScene
material.transparent.contents = spriteKitMaskScene
let background = SCNPlane(...)
background.materials = [material]
Add a surface shader modifier to the material. It is going to "convert" black color from the mask video (well, actually red color, since we only need one color component) into alpha.
let surfaceShader = "_surface.transparent.a = 1 - _surface.transparent.r;"
material.shaderModifiers = [ .surface: surfaceShader ]
That's it! Now the white color on the masking video is going to be transparent on the plane.
However you would have to take extra care of syncronizing these two videos since AVPlayers will probably get out of sync. Sadly I didn't have time to address that in my example project (yet, I will get back to it when I have time). Look into this question for a possible solution.
Pros:
No artifacts (if syncronized)
Precise
Cons:
Requires two videos instead of one
Requires synchronisation of the AVPlayers
2. Chroma keying
You would need a video that has a vibrant color as a background that would represent parts that should be transparent. Usually green or magenta are used.
Create a SKScene for this video like you normally would and put it into material.diffuse.contents.
Add a chroma key surface shader modifier which will cut out the color of your choice and make these areas transparent. I've lent this shader from GPUImage and I don't really know how it actually works. But it seems to be explained in this answer.
let surfaceShader =
"""
uniform vec3 c_colorToReplace = vec3(0, 1, 0);
uniform float c_thresholdSensitivity = 0.05;
uniform float c_smoothing = 0.0;
#pragma transparent
#pragma body
vec3 textureColor = _surface.diffuse.rgb;
float maskY = 0.2989 * c_colorToReplace.r + 0.5866 * c_colorToReplace.g + 0.1145 * c_colorToReplace.b;
float maskCr = 0.7132 * (c_colorToReplace.r - maskY);
float maskCb = 0.5647 * (c_colorToReplace.b - maskY);
float Y = 0.2989 * textureColor.r + 0.5866 * textureColor.g + 0.1145 * textureColor.b;
float Cr = 0.7132 * (textureColor.r - Y);
float Cb = 0.5647 * (textureColor.b - Y);
float blendValue = smoothstep(c_thresholdSensitivity, c_thresholdSensitivity + c_smoothing, distance(vec2(Cr, Cb), vec2(maskCr, maskCb)));
float a = blendValue;
_surface.transparent.a = a;
"""
shaderModifiers = [ .surface: surfaceShader ]
To set uniforms use setValue(:forKey:) method.
let vector = SCNVector3(x: 0, y: 1, z: 0) // represents float RGB components
setValue(vector, forKey: "c_colorToReplace")
setValue(0.3 as Float, forKey: "c_smoothing")
setValue(0.1 as Float, forKey: "c_thresholdSensitivity")
The as Float part is important, otherwise Swift is going to cast the value as Double and shader will not be able to use it.
But to get a precise masking from this you would have to really tinker with the c_smoothing and c_thresholdSensitivity uniforms. In my example project I ended up having a little green rim around the shape, but maybe I just didn't use the right values.
Pros:
only one video required
simple setup
Cons:
possible artifacts (green rim around the border)

Resources