Indexed drawing with metal - ios

I am trying to load a model (form .OBJ) and draw it to the screen on iOS with MetalKit. The problem is that instead of my model, I get some random polygons...
Here is the code that is tend to load the model(The code is based on a tutorial from raywenderlich.com:
let allocator = MTKMeshBufferAllocator(device: device)
let vertexDescriptor = MDLVertexDescriptor()
let vertexLayout = MDLVertexBufferLayout()
vertexLayout.stride = sizeof(Vertex)
vertexDescriptor.layouts = [vertexLayout]
vertexDescriptor.attributes = [MDLVertexAttribute(name: MDLVertexAttributePosition, format: MDLVertexFormat.Float3, offset: 0, bufferIndex: 0),
MDLVertexAttribute(name: MDLVertexAttributeColor, format: MDLVertexFormat.Float4, offset: sizeof(float3), bufferIndex: 0),
MDLVertexAttribute(name: MDLVertexAttributeTextureCoordinate, format: MDLVertexFormat.Float2, offset: sizeof(float3)+sizeof(float4), bufferIndex: 0),
MDLVertexAttribute(name: MDLVertexAttributeNormal, format: MDLVertexFormat.Float3, offset: sizeof(float3)+sizeof(float4)+sizeof(float2), bufferIndex: 0)]
var error: NSError?
let asset = MDLAsset(URL: path, vertexDescriptor: vertexDescriptor, bufferAllocator: allocator, preserveTopology: true, error: &error)
if error != nil{
print(error)
return nil
}
let model = asset.objectAtIndex(0) as! MDLMesh
let mesh = try MTKMesh(mesh: model, device: device)
And here is my drawing method:
func render(commandQueue: MTLCommandQueue, pipelineState: MTLRenderPipelineState,drawable: CAMetalDrawable,projectionMatrix: float4x4,modelViewMatrix: float4x4, clearColor: MTLClearColor){
dispatch_semaphore_wait(bufferProvider.availibleResourcesSemaphore, DISPATCH_TIME_FOREVER)
let renderPassDescriptor = MTLRenderPassDescriptor()
renderPassDescriptor.colorAttachments[0].texture = drawable.texture
renderPassDescriptor.colorAttachments[0].loadAction = .Clear
renderPassDescriptor.colorAttachments[0].clearColor = clearColor
renderPassDescriptor.colorAttachments[0].storeAction = .Store
let commandBuffer = commandQueue.commandBuffer()
commandBuffer.addCompletedHandler { (buffer) in
dispatch_semaphore_signal(self.bufferProvider.availibleResourcesSemaphore)
}
let renderEncoder = commandBuffer.renderCommandEncoderWithDescriptor(renderPassDescriptor)
renderEncoder.setCullMode(MTLCullMode.None)
renderEncoder.setRenderPipelineState(pipelineState)
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, atIndex: 0)
renderEncoder.setFragmentTexture(texture, atIndex: 0)
if let samplerState = samplerState{
renderEncoder.setFragmentSamplerState(samplerState, atIndex: 0)
}
var nodeModelMatrix = self.modelMatrix()
nodeModelMatrix.multiplyLeft(modelViewMatrix)
uniformBuffer = bufferProvider.nextUniformsBuffer(projectionMatrix, modelViewMatrix: nodeModelMatrix, light: light)
renderEncoder.setVertexBuffer(self.uniformBuffer, offset: 0, atIndex: 1)
renderEncoder.setFragmentBuffer(uniformBuffer, offset: 0, atIndex: 1)
if indexBuffer != nil{
renderEncoder.drawIndexedPrimitives(.Triangle, indexCount: self.indexCount, indexType: self.indexType, indexBuffer: self.indexBuffer!, indexBufferOffset: 0)
}else{
renderEncoder.drawPrimitives(.Triangle, vertexStart: 0, vertexCount: vertexCount, instanceCount: vertexCount/3)
}
renderEncoder.endEncoding()
commandBuffer.presentDrawable(drawable)
commandBuffer.commit()
}
Here is my vertex shader:
struct VertexIn{
packed_float3 position;
packed_float4 color;
packed_float2 texCoord;
packed_float3 normal;
};
struct VertexOut{
float4 position [[position]];
float3 fragmentPosition;
float4 color;
float2 texCoord;
float3 normal;
};
struct Light{
packed_float3 color;
float ambientIntensity;
packed_float3 direction;
float diffuseIntensity;
float shininess;
float specularIntensity;
};
struct Uniforms{
float4x4 modelMatrix;
float4x4 projectionMatrix;
Light light;
};
vertex VertexOut basic_vertex(
const device VertexIn* vertex_array [[ buffer(0) ]],
const device Uniforms& uniforms [[ buffer(1) ]],
unsigned int vid [[ vertex_id ]]) {
float4x4 mv_Matrix = uniforms.modelMatrix;
float4x4 proj_Matrix = uniforms.projectionMatrix;
VertexIn VertexIn = vertex_array[vid];
VertexOut VertexOut;
VertexOut.position = proj_Matrix * mv_Matrix * float4(VertexIn.position,1);
VertexOut.fragmentPosition = (mv_Matrix * float4(VertexIn.position,1)).xyz;
VertexOut.color = VertexIn.color;
VertexOut.texCoord = VertexIn.texCoord;
VertexOut.normal = (mv_Matrix * float4(VertexIn.normal, 0.0)).xyz;
return VertexOut;
}
And here is how it looks like:
link
Actually I have an other class that is completely written by me to load models. It works fine, the problem is that it is not using indexing so f I try to load models that are more complex than a low-poly sphere, the GPU crashes... Anyways I tried to modify it to use indexing and I got the same result.. than I added hardcoded indices for testing and I got a really weird result. When I had 3 indices it drew a triangle, when I added 3 more, it drew the same triangle and after 3 more vertices it drew 2 triangles...
Edit:
Here is my Vertex structure:
struct Vertex:Equatable{
var x,y,z: Float
var r,g,b,a: Float
var s,t: Float
var nX,nY,nZ:Float
func floatBuffer()->[Float]{
return [x,y,z,r,g,b,a,s,t,nX,nY,nZ]
}
}

I see a couple of potential issues here.
1) Your vertex descriptor does not map exactly to your Vertex struct. The position variables (x, y, z) occupy 12 bytes, so the color variables start at an offset of 12 bytes. This matches the packed_float3 position field in your shader's VertexIn struct, but in the vertex descriptor you provide to Model I/O, you use sizeof(Float3), which is 16, as the offset of the color attribute. Because you're packing the position field, you should use sizeof(Float) * 3 for this value instead, and likewise in the subsequent offsets. I suspect this is the main cause of your problems.
More generally, it's a good idea to use strideof rather than sizeof to account for alignment, though--by chance--it wouldn't make a difference here.
2) Model I/O is allowed to use a single MTLBuffer to store both vertices and indices, so you should use the offset member of each MTKMeshBuffer when setting the vertex buffer or specifying the index buffer in each draw call, rather than assuming the offsets to be 0.

Related

Converting an MLMultiArray to an image or an OpenGL / metal texture

I'm trying to do background segmentation of a live video using CoreML. I used DeepLabV3 as provided by Apple. The model works ok, even though it already takes 100ms to process a 513x513 image. I then want to display the output, which is a 513x513 array of int32. Converting it in an image as done in CoreMLHelpers takes 300ms and I'm looking for a much faster way to display the results. I was thinking that maybe it'd be faster to somehow dump this to a OpenGL or Metal texture.
What is the best way to handle MLMultiArray for live inputs?
My answer is based on processing the MLMultiArray in Metal
Create an MTLBuffer:
let device = MTLCreateSystemDefaultDevice()!
let segmentationMaskBuffer: MTLBuffer = self.device.makeBuffer(length: segmentationHeight * segmentationWidth * MemoryLayout<Int32>.stride)
Copy MLMultiArray to MTLBuffer:
memcpy(segmentationMaskBuffer.contents(), mlOutput.semanticPredictions.dataPointer, segmentationMaskBuffer.length)
Setup Metal related variables:
let commandQueue = device.makeCommandQueue()!
let library = device.makeDefaultLibrary()!
let function = library.makeFunction(name: "binaryMask")!
let computePipeline = try! device.makeComputePipelineState(function: function)
create a struct for segmentation size:
let segmentationWidth = 513
let segmentationHeight = 513
struct MixParams {
var width: Int32 = Int32(segmentationWidth)
var height: Int32 = Int32(segmentationHeight)
}
create a output texture:
let textureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: width, height: height, mipmapped: false)
textureDescriptor.usage = [.shaderRead, .shaderWrite]
let outputTexture = device.makeTexture(descriptor: textureDescriptor)!
pass the mtlbuffer, outputtexture to the kernal function:
let buffer = commandQueue.makeCommandBuffer()!
let maskCommandEncoder = buffer.makeComputeCommandEncoder()!
maskCommandEncoder.setTexture(outputTexture, index: 1)
maskCommandEncoder.setBuffer(segmentationBuffer, offset: 0, index: 0)
maskCommandEncoder.setBytes(&params, length: MemoryLayout<MixParams>.size, index: 1)
let w = computePipeline.threadExecutionWidth
let h = computePipeline.maxTotalThreadsPerThreadgroup / w
let threadGroupSize = MTLSizeMake(w, h, 1)
let threadGroups = MTLSizeMake(
(depthWidth + threadGroupSize.width - 1) / threadGroupSize.width,
(depthHeight + threadGroupSize.height - 1) / threadGroupSize.height, 1)
maskCommandEncoder.setComputePipelineState(computePipeline)
maskCommandEncoder.dispatchThreadgroups(threadGroups, threadsPerThreadgroup: threadGroupSize)
maskCommandEncoder.endEncoding()
write your kernel function in Shaders.metal file:
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h>
struct MixParams {
int segmentationWidth;
int segmentationHeight;
};
static inline int get_class(float2 pos, int width, int height, device int* mask) {
const int x = int(pos.x * width);
const int y = int(pos.y * height);
return mask[y*width + x];
}
static float get_person_probability(float2 pos, int width, int height, device int* mask) {
return get_class(pos, width, height, mask) == 15;
}
kernel void binaryMask(
texture2d<float, access::write> outputTexture [[texture(1)]],
device int* segmentationMask [[buffer(0)]],
constant MixParams& params [[buffer(1)]],
uint2 gid [[thread_position_in_grid]])
{
float width = outputTexture.get_width();
float height = outputTexture.get_height();
if (gid.x >= width ||
gid.y >= height) return;
const float2 pos = float2(float(gid.x) / width,
float(gid.y) / height);
const float is_person = get_person_probability(pos, params.segmentationWidth,
params.segmentationHeight,
segmentationMask);
float4 outPixel;
if (is_person < 0.5f) {
outPixel = float4(0.0,0.0,0.0,0.0);
} else {
outPixel = float4(1.0,1.0,1.0,1.0);
}
outputTexture.write(outPixel, gid);
}
Finally get the ciimage from output texture after encoding is finished:
let kciOptions: [CIImageOption: Any] = [CIImageOption.colorSpace: CGColorSpaceCreateDeviceRGB()]
let maskIMage = CIImage(mtlTexture: outputTexture,options: kciOptions)!.oriented(.downMirrored)
Instead of outputting an MLMultiArray you can change the model to make it output an image of type CVPixelBuffer. Then you can use CVMetalTextureCacheCreateTextureFromImage to turn the pixel buffer into an MTLTexture. (I think this works but I don't recall if I ever tried it. Not all pixel buffer objects can be turned into textures and I'm not sure if Core ML outputs a CVPixelBuffer object with the "Metal compatibility flag" turned on.)
Alternatively, you can write a compute kernel that takes in the MLMultiArray and converts it to a texture, which then gets drawn into a Metal view. This has the advantage that you apply all kinds of effects to the segmentation map in the compute kernel at the same time.

Metal vertex shader draw points of a Texture

I want to execute Metal (or OpenGLES 3.0) shader that draws Points primitive with blending. To do that, I need to pass all the pixel coordinates of the texture to Vertex shader as vertices which computes the position of the vertex to be passed to fragment shader. The fragment shader simply outputs the color for the point with blending enabled. My problem is if there is an efficient was to pass coordinates of vertices to the vertex shader, since there would be too many vertices for 1920x1080 image, and that needs to be done 30 times in a second? Like we do in a compute shader by using dispatchThreadgroups command, except that compute shader can not draw a geometry with blending enabled.
EDIT: This is what I did -
let vertexFunctionRed = library!.makeFunction(name: "vertexShaderHistogramBlenderRed")
let fragmentFunctionAccumulator = library!.makeFunction(name: "fragmentShaderHistogramAccumulator")
let renderPipelineDescriptorRed = MTLRenderPipelineDescriptor()
renderPipelineDescriptorRed.vertexFunction = vertexFunctionRed
renderPipelineDescriptorRed.fragmentFunction = fragmentFunctionAccumulator
renderPipelineDescriptorRed.colorAttachments[0].pixelFormat = .bgra8Unorm
renderPipelineDescriptorRed.colorAttachments[0].isBlendingEnabled = true
renderPipelineDescriptorRed.colorAttachments[0].rgbBlendOperation = .add
renderPipelineDescriptorRed.colorAttachments[0].sourceRGBBlendFactor = .one
renderPipelineDescriptorRed.colorAttachments[0].destinationRGBBlendFactor = .one
do {
histogramPipelineRed = try device.makeRenderPipelineState(descriptor: renderPipelineDescriptorRed)
} catch {
print("Unable to compile render pipeline state Histogram Red!")
return
}
Drawing code:
let commandBuffer = commandQueue?.makeCommandBuffer()
let renderEncoder = commandBuffer?.makeRenderCommandEncoder(descriptor: renderPassDescriptor!)
renderEncoder?.setRenderPipelineState(histogramPipelineRed!)
renderEncoder?.setVertexTexture(metalTexture, index: 0)
renderEncoder?.drawPrimitives(type: .point, vertexStart: 0, vertexCount: 1, instanceCount: metalTexture!.width*metalTexture!.height)
renderEncoder?.drawPrimitives(type: .point, vertexStart: 0, vertexCount: metalTexture!.width*metalTexture!.height, instanceCount: 1)
and Shaders:
vertex MappedVertex vertexShaderHistogramBlenderRed (texture2d<float, access::sample> inputTexture [[ texture(0) ]],
unsigned int vertexId [[vertex_id]])
{
MappedVertex out;
constexpr sampler s(s_address::clamp_to_edge, t_address::clamp_to_edge, min_filter::linear, mag_filter::linear, coord::pixel);
ushort width = inputTexture.get_width();
ushort height = inputTexture.get_height();
float X = (vertexId % width)/(1.0*width);
float Y = (vertexId/width)/(1.0*height);
int red = inputTexture.sample(s, float2(X,Y)).r;
out.position = float4(-1.0 + (red * 0.0078125), 0.0, 0.0, 1.0);
out.pointSize = 1.0;
out.colorFactor = half3(1.0, 0.0, 0.0);
return out;
}
fragment half4 fragmentShaderHistogramAccumulator ( MappedVertex in [[ stage_in ]]
)
{
half3 colorFactor = in.colorFactor;
return half4(colorFactor*(1.0/256.0), 1.0);
}
Maybe you can draw a single point instanced 1920x1080 times. Something like:
vertex float4 my_func(texture2d<float, access::read> image [[texture(0)]],
constant uint &width [[buffer(0)]],
uint instance_id [[instance_id]])
{
// decompose the instance ID to a position
uint2 pos = uint2(instance_id % width, instance_id / width);
return float4(image.read(pos).r * 255, 0, 0, 0);
}

How to pass an array of float3 values from CPU to GPU in Metal?

I have a requirement to pass the color values to the shader as an array. Right now I'm passing it as a structure RGBColors and receiving it as a structure and it is working fine.
But I want to receive it as float3 value in the shader. But as soon as I change it to float3 it is acting weird and it starts to flicker, it is not giving me proper color.
Here is the code I used to set the fragment buffer,
func setFragmentBuffer(_ values: [Float], at index: Int) {
let bufferValues = values
let datasize = 16 * values.count / 3
let colorBuffer = device.makeBuffer(bytes: bufferValues, length: datasize, options: [])
renderEncoder.setFragmentBuffer(colorBuffer, offset: 0, at: index)
}
Here is the structure for the RGBColors
struct RGBColors {
var r: Float
var g: Float
var b: Float
func floatBuffers() -> [Float] {
return [r,g,b]
}
}
From this structure I will create an array of Float values and set it to fragment buffer.
The code let datasize = 16 * values.count / 3 I gave the value 16 because the although float data type in C++ is 4 bytes float3 in simd is 16 bytes.
And in the shader I'm implementing the method
fragment float4
singleShader(RasterizerData in [[stage_in]],
texture2d<half> sourceTexture [[ texture(0) ]],
const device float3 &rgbColor [[ buffer(1) ]]
{
constexpr sampler textureSampler (mag_filter::linear,
min_filter::linear);
// Sample the texture and return the color to colorSample
const half4 colorSample = sourceTexture.sample (textureSampler, in.textureCoordinate);
float4 outputColor;
float red = colorSample.r * rgbColor.r;
float green = colorSample.g * rgbColor.g;
float blue = colorSample.b * rgbColor.b;
outputColor = float4(red, green,blue, colorSample.a);
outputColor = float4((outputColor.rgb * param1 + param2) / 4, colorSample.a);
return outputColor;
}
Finally I'm not getting the right output color.
How to match float3 simd data type to the swift Float data type. Can someone suggest me?
Edit:
I found a solution about how to create a MTLBuffer from float3 and here is the code:
func setFragmentBuffer(_ values: [float3], at index: Int) {
var valueBuffer = values
let bufferCreated = device.makeBuffer(length: MemoryLayout.size(ofValue: valueBuffer[0]) * 2 * valueBuffer.count , options: [])
let bufferPointer = bufferCreated.contents()
memcpy(bufferPointer, &valueBuffer, 16 * valueBuffer.count)
renderEncoder.setFragmentBuffer(bufferCreated, offset: 0, at: index)
}
This code works perfectly fine. But still in the code let bufferCreated = device.makeBuffer(length: MemoryLayout.size(ofValue: valueBuffer[0]) * 2 * valueBuffer.count , options: []) you can see that a the length should be multiplied by a factor of 2 to make the code work.
Why there should a multiplier to make it work? Im not understanding this. Could someone suggest me?

Programmatically writing a texture and reading it in fragment shader

I've written a Compute shader that outputs to a Texture. The coordinate system of the output texture is in pixels. I then have a basic vertex and fragment shader that should simply sample the value and respond with what I thought would be in normalised coordinates. However, I thought this mapping between my programmatically drawn texture and the vertices of my rendering surface would match up.
The Compute Function
Can be summarized as
texture.write(color, uint2(x, y));
where x and y are integer pixel locations.
The Vertex Data
// position.x, position.y, texCoords.x, texCoords.y
let vertexData = [Float](arrayLiteral:
-1, 1, 0, 0,
-1, -1, 0, 1,
1, -1, 1, 1,
1, -1, 1, 1,
1, 1, 1, 0,
-1, 1, 0, 0)
The Metal Shader
typedef struct {
packed_float2 position;
packed_float2 texCoords;
} VertexIn;
typedef struct {
float4 position [[ position ]];
float2 texCoords;
} FragmentVertex;
vertex FragmentVertex simple_vertex(device VertexIn *vertexArray [[ buffer(0) ]],
uint vertexIndex [[ vertex_id ]])
{
VertexIn in = vertexArray[vertexIndex];
FragmentVertex out;
out.position = float4(in.position, 0.f, 1.f);
out.texCoords = in.texCoords;
return out;
}
fragment float4 simple_fragment(FragmentVertex in [[ stage_in ]],
texture2d<uint, access::sample> inputTexture [[ texture(0) ]],
sampler linearSampler [[ sampler(0) ]])
{
const uint2 imageSizeInPixels = uint2(360, 230);
float imageSizeInPixelsWidth = imageSizeInPixels.x;
float imageSizeInPixelsHeight = imageSizeInPixels.y;
float2 coords = float2(in.position.x / 360.f, in.position.y / 230.f);
float color = inputTexture.sample(linearSampler, in.texCoords).x / 255.f;
return float4(float3(color), 1.f);
}
The Sampler
let samplerDescriptor = MTLSamplerDescriptor()
samplerDescriptor.normalizedCoordinates = true
samplerDescriptor.minFilter = .linear
samplerDescriptor.magFilter = .linear
samplerDescriptor.sAddressMode = .clampToZero
samplerDescriptor.rAddressMode = .clampToZero
self.samplerState = self.metalDevice?.makeSamplerState(descriptor: samplerDescriptor)
In this experiment the only value that seems to work is coords, based upon the normalized in.position value. in.texCoords seems to always be zero. Shouldn't the texcoords and position received by the vertex and fragment shader be values be in the range of values defined in the vertex data?
My Vertex Buffer was right, but wrong
In the process of converting Obj-C code to Swift I failed to copy the vertex completely.
The Correct Copy
let byteCount = vertexData.count * MemoryLayout<Float>.size
let vertexBuffer = self.metalDevice?.makeBuffer(bytes: vertexData, length: byteCount, options: options)
The Source of my Woes
let vertexBuffer = self.metalDevice?.makeBuffer(bytes: vertexData, length: vertexData.count, options: options)
The Complete Vertex Buffer Creation
// Vertex data for a full-screen quad. The first two numbers in each row represent
// the x, y position of the point in normalized coordinates. The second two numbers
// represent the texture coordinates for the corresponding position.
let vertexData = [Float](arrayLiteral:
-1, 1, 0, 0,
-1, -1, 0, 1,
1, -1, 1, 1,
1, -1, 1, 1,
1, 1, 1, 0,
-1, 1, 0, 0)
// Create a buffer to hold the static vertex data
let options = MTLResourceOptions().union(.storageModeShared)
let byteCount = vertexData.count * MemoryLayout<Float>.size
let vertexBuffer = self.metalDevice?.makeBuffer(bytes: vertexData, length: byteCount, options: options)
vertexBuffer?.label = "Image Quad Vertices"
self.vertexBuffer = vertexBuffer

Direct3D11, some part of the model always in front of the others, probably about depth

I'm a new D3D programmer.
When I tried to render a model, I got a strange problem.!
you can see the picture, some part of the model always in front of the others.
the model vertex only contains the following data
{
float x, y, z;
float r, g, b;
float u, v;
}
I tried to render it in opengl and webgl ( http://nalol.azurewebsites.net/ ), it works well. but in D3D11, I got this strange problem.
I tried Google and find something about depth, but i don't know how to deal with it.
the following are some part of my code:
HLSL file
struct vout
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
float2 texcoord : TEXCOORD;
};
vout vshader(float3 position : POSITION, float3 normals : NORMAL, float2 texcoords : TEXCOORD)
{
vout output;
output.position = float4(position, 1);
output.normal = normals ;
output.texcoord = texcoords;
return output;
}
Texture2D shaderTexture;
SamplerState SampleType;
float3 pshader(float3 position : POSITION, float3 normals : NORMAL, float2 texcoords : TEXCOORD) : SV_TARGET
{
return shaderTexture.Sample(SampleType, texcoords);
}
vertex struct
struct lol_skn_vertex {
float position[3];
char bone_index[4]; // for bones and animation, not used here
float bone_weights[4]; // for bones and animation, not used here
float normals[3];
float texcoords[2];
};
input layout object
D3D11_INPUT_ELEMENT_DESC ied[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"NORMAL", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 32, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 44, D3D11_INPUT_PER_VERTEX_DATA, 0},
};
render function
void RenderFrame(void)
{
FLOAT ColorRGBA[4] = {0.0f, 0.2f, 0.4f, 1.0f};
d3d11_device_context->ClearRenderTargetView(d3d11_view_rt_backbuffer, ColorRGBA);
d3d11_device_context->ClearDepthStencilView(d3d11_view_ds,D3D11_CLEAR_DEPTH|D3D11_CLEAR_STENCIL,1.f,0);
update();
UINT stride = sizeof(lol_skn_vertex);
UINT offset = 0;
d3d11_device_context->IASetVertexBuffers(0, 1, &vertex_buffer, &stride, &offset);
d3d11_device_context->IASetIndexBuffer(index_buffer, DXGI_FORMAT_R16_UINT, 0);
d3d11_device_context->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3d11_device_context->DrawIndexed(skn.num_indices, 0, 0);
// switch the back buffer and the front buffer
dxgi_swapchain->Present(0, 0);
}
buffer update function
void update() {
// copy the vertices into the buffer
D3D11_MAPPED_SUBRESOURCE ms;
d3d11_device_context->Map(vertex_buffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, skn_vertex_buffer, sizeof(lol_skn_vertex) * skn.num_vertices); // copy the data
// unmap the buffer
SYSTEMTIME SystemTime;
GetSystemTime(&SystemTime);
float angle = (float)SystemTime.wMilliseconds/1000+SystemTime.wSecond;
D3DXMATRIX x;
D3DXMatrixRotationY(&x, angle);
D3DXVec4TransformArray((D3DXVECTOR4 *)ms.pData, sizeof(lol_skn_vertex), (D3DXVECTOR4 *)ms.pData, sizeof(lol_skn_vertex), &x, skn.num_vertices);
// use D3DXVECTOR4 for Transform
d3d11_device_context->Unmap(vertex_buffer, NULL);
}
At last I solved the problem.
I make 2 very stupid mistake.
first: in "input layout object", i use DXGI_FORMAT_R32G32_FLOAT for position, which only contain x and y. so the shader always get 0 on z.
second: my model data is not normalized, which ranged from -50 to 50, so i use D3D11_RASTERIZER_DESC to disable DepthClip and I forgot about it.
Fix this 2 problems and everything works.
And great thank to Gnietschow :)

Resources