DirectX 11 Drawing Circles - directx

I am trying to draw points with various thickness. I was using triangle method but resulting circles had jagged edges.I had to quit this method due to performance issues and visual quality goals of my app. I saw Pixel Shader method in this video. My problem with this method is my point thickness values are not in pixels.They are in meters. Do I have to convert back to World coordinates in pixel shader to measure distance from center? Or, is there another approach to overcome coordinate conversion process?
My attempt:
Topology
dxmanager->Context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
Vertex Shader
struct VOut
{
float4 position : SV_POSITION;
float4 color : COLOR;
float size : RSIZE;
};
VOut main(float3 position : POSITION, float4 color : COLOR, float size: RSIZE)
{
VOut output;
output.position = float4(position, 1);
output.color = color;
output.size = size;
return output;
}
Pixel Shader
float4 main(float4 position : SV_POSITION, float4 color : COLOR, float4 centre : POSITION,float max:RSIZE) : SV_TARGET
{
if (distance(position,centre)<max) // Always False
return color;
return float4(0, 0, 0, 0);
}
Geometry Shader
[maxvertexcount(6)]
void main(point VOut input[1], inout TriangleStream<GOut> OutputStream)
{
GOut P1;
P1.pos = float4(input[0].position.x-input[0].size/2, input[0].position.y - input[0].size/2, input[0].position.z,1);
P1.color = float4(0.6f, 0.4f, 0.8f, 0.5f);
P1.pos = mul(world, P1.pos);
P1.centre = mul(world, input[0].position);
P1.max = input[0].size * scale;
OutputStream.Append(P1);
GOut P2;
P2.pos = float4(input[0].position.x - input[0].size / 2, input[0].position.y + input[0].size / 2, input[0].position.z, 1);
P2.color = float4(0.6f, 0.4f, 0.8f, 0.5f);
P2.pos = mul(world, P2.pos);
P2.centre = mul(world, input[0].position);
P2.max = input[0].size*scale;
OutputStream.Append(P2);
GOut P3;
P3.pos = float4(input[0].position.x + input[0].size / 2, input[0].position.y - input[0].size / 2, input[0].position.z, 1);
P3.color = float4(0.6f, 0.4f, 0.8f, 0.5f);
P3.pos = mul(world, P3.pos);
P3.centre = mul(world, input[0].position);
P3.max = input[0].size * scale;
OutputStream.Append(P3);
GOut P4;
P4.pos = float4(input[0].position.x + input[0].size / 2, input[0].position.y + input[0].size / 2, input[0].position.z, 1);
P4.color = float4(0.6f, 0.4f, 0.8f, 0.5f);
P4.pos = mul(world, P4.pos);
P4.centre = mul(world, input[0].position);
P4.max = input[0].size * scale;
OutputStream.Append(P4);
GOut P5;
P5.pos = float4(input[0].position.x + input[0].size / 2, input[0].position.y - input[0].size / 2, input[0].position.z, 1);
P5.color = float4(0.6f, 0.4f, 0.8f, 0.5f);
P5.pos = mul(world, P5.pos);
P5.centre = mul(world, input[0].position);
P5.max = input[0].size * scale;
OutputStream.Append(P5);
GOut P6;
P6.pos = float4(input[0].position.x - input[0].size / 2, input[0].position.y - input[0].size / 2, input[0].position.z, 1);
P6.color = float4(0.6f, 0.4f, 0.8f, 0.5f);
P6.pos = mul(world, P6.pos);
P6.centre = mul(world, input[0].position);
P6.max = input[0].size * scale;
OutputStream.Append(P6);
}

Related

iOS: Metal, CoreImage and GL Shaders

I would like to switch the camera I’m working on, from GL (GPUImage) to Metal.
I tried to figure out what may be wrong with the Metal shader I’m using in order to apply a lookup filter to a MTLtexture, but I have some banding issues, blue lines, especially when displaying dark gradients.
These blue banding issues appear when applying the metal shader, not when using CoreImage.
The rest of the image is just fine.
Here is an example using a lookup image to produce high-contrast:
Left:CoreImage Shader; Right: Metal Shader
(source image is the exactly same for the two examples)
As you can see, Metal seems to be less precise than CoreImage and produces these strange blue bands.
I'd like to know if this is a normal issue, considering both shaders are an adaptation of Brad Larson's GPUImage work.
The Metal Shader:
kernel void LookupFilterKernel(texture2d<float, access::read> sourceTexture [[texture(0)]],
texture2d<float, access::write> outTexture [[texture(1)]],
texture2d<float, access::read> lutTexture [[texture(2)]],
uint2 gid [[thread_position_in_grid]]){
float4 color = sourceTexture.read(gid);
float blueColor = color.b * 63.0;
float2 quad1;
quad1.y = floor(floor(blueColor) / 8.0);
quad1.x = floor(blueColor) - (quad1.y * 8.0);
float2 quad2;
quad2.y = floor(ceil(blueColor) / 8.0);
quad2.x = ceil(blueColor) - (quad2.y * 8.0);
float2 texPos1;
texPos1.x = (quad1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.r);
texPos1.y = (quad1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.g);
float2 texPos2;
texPos2.x = (quad2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.r);
texPos2.y = (quad2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.g);
float4 newColor1 = lutTexture.read(uint2(texPos1.x * 512 , texPos2.y * 512));
float4 newColor2 = lutTexture.read(uint2(texPos2.x * 512, texPos2.y * 512 ));
float4 newColor = mix(newColor1, newColor2, fract(blueColor));
float4 finalColor = mix(color, float4(newColor.rgb, color.w), 1.0);
outTexture.write(finalColor, gid);
}
And the CoreImage Shader:
kernel vec4 LookupFilterKernel(sampler inputImage, sampler inputLUT, float intensity) {
vec4 textureColor = sample(inputImage,samplerCoord(inputImage));
textureColor = clamp(textureColor, vec4(0.0), vec4(1.0));
float blueColor = textureColor.b * 63.0;
vec2 quad1;
quad1.y = floor(floor(blueColor) / 8.0);
quad1.x = floor(blueColor) - (quad1.y * 8.0);
vec2 quad2;
quad2.y = floor(ceil(blueColor) / 8.0);
quad2.x = ceil(blueColor) - (quad2.y * 8.0);
vec2 texPos1;
texPos1.x = (quad1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r);
texPos1.y = (quad1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g);
vec2 texPos2;
texPos2.x = (quad2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.r);
texPos2.y = (quad2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * textureColor.g);
texPos1.y = 1.0 - texPos1.y;
texPos2.y = 1.0 - texPos2.y;
vec4 inputLUTExtent = samplerExtent(inputLUT);
vec4 newColor1 = sample(inputLUT, samplerTransform(inputLUT, texPos1 * vec2(512.0) + inputLUTExtent.xy));
vec4 newColor2 = sample(inputLUT, samplerTransform(inputLUT, texPos2 * vec2(512.0) + inputLUTExtent.xy));
vec4 newColor = mix(newColor1, newColor2, fract(blueColor));
return mix(textureColor, vec4(newColor.rgb, textureColor.a), intensity);
}
The source texture is a MTLTexture im receiving from the camera's captureOutput function.
If you have any idea why I have this problem or if you need more code / explanations, please let me know.
Many thanks !
EDIT 1:
Ok so after Frank's answer, I tried to fix these gamma issues. So I linearized the LUT texture and the source texture, updating the shader:
static float linearTosRGB(float c) {
return powr(c, 1./2.2);
}
static float srgbToLinear(float c) {
if (c <= 0.04045)
return c / 12.92;
else
return powr((c + 0.055) / 1.055, 2.4);
}
kernel void LookupFilterKernel(texture2d<float, access::read> sourceTexture [[texture(0)]],
texture2d<float, access::write> outTexture [[texture(1)]],
texture2d<float, access::read> lutTexture [[texture(2)]],
uint2 gid [[thread_position_in_grid]]){
float4 color = sourceTexture.read(gid);
color.r = srgbToLinear(color.r);
color.g = srgbToLinear(color.g);
color.b = srgbToLinear(color.b);
float blueColor = color.b * 63.0;
float2 quad1;
quad1.y = floor(floor(blueColor) / 8.0);
quad1.x = floor(blueColor) - (quad1.y * 8.0);
float2 quad2;
quad2.y = floor(ceil(blueColor) / 8.0);
quad2.x = ceil(blueColor) - (quad2.y * 8.0);
float2 texPos1;
texPos1.x = (quad1.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.r);
texPos1.y = (quad1.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.g);
float2 texPos2;
texPos2.x = (quad2.x * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.r);
texPos2.y = (quad2.y * 0.125) + 0.5/512.0 + ((0.125 - 1.0/512.0) * color.g);
float4 newColor1 = lutTexture.read(uint2(texPos1.x * 512 , texPos2.y * 512));
float4 newColor2 = lutTexture.read(uint2(texPos2.x * 512, texPos2.y * 512 ));
float4 newColor = mix(newColor1, newColor2, fract(blueColor));
float4 finalColor = mix(color, float4(newColor.rgb, color.w), 1.0);
finalColor.r = linearTosRGB(finalColor.r);
finalColor.g = linearTosRGB(finalColor.g);
finalColor.b = linearTosRGB(finalColor.b);
outTexture.write(finalColor, gid);
}
The LUT texture is previously transformed into linear gamma with this shader:
kernel void sRGBToLinearKernel(texture2d<float, access::read> sourceTexture [[texture(0)]],
texture2d<float, access::write> outTexture [[texture(1)]],
uint2 gid [[thread_position_in_grid]]){
float4 color = sourceTexture.read(gid);
color.r = srgbToLinear(color.r);
color.g = srgbToLinear(color.g);
color.b = srgbToLinear(color.b);
outTexture.write(color, gid);
}
And ... it didn't fixed anything. I still have this strange banding. Maybe i'm doing something wrong ?
EDIT 2:
I tried to put the pixel format to MTLPixelFormatRGBA16Float, but result is still wrong.
I realized that if I changed this line in the shader:
float4 newColor = mix(newColor1, newColor2, fract(blueColor));
to:
float4 newColor = mix(newColor1, newColor2, 1.0);
the blue bands disappear. I'm assuming that the calculation of quad1 isn't right and produces these bands.
So my question is: why this occurs only on metal shader and not in CoreImage or GPUImage's ones ?

DirectX Intel HD and NVIDIA different behavior Geometry Shader

My code uses a geometry shader to produce thick lines using this: https://forum.libcinder.org/topic/smooth-thick-lines-using-geometry-shader
(Uses geometry shader approach)
I get it work on my local machine using an Intel HD Graphics card.
However, if I use the same settings on my destination machine the lines that are drawn with weird gaps.
I don't understand why, because on different Intel HD it works.
Note that my target is a NVS 300 that is fairly old, but supports FL10_1 and Geometry shader I guess. The Intel devices I try it with might be a bit newer.
Since I force the feature level on my development on device creation to be 10_1 I expect no difference.
I don't see any error codes that could hint a arbitrary behavior in the output that would explain it, even if I set up native code debugging or remote debugging.
Does anyone have a clue why this behaves differently?
I could add images, but you basically see a thick sine curve on my local machine and a thick ,but fractioned with gaps, on the target.
Thanks in advance for any clues.
cbuffer constBuffer
{
float THICKNESS;
float2 WIN_SCALE;
};
struct PSInput
{
float4 Position : SV_POSITION;
};
float2 toScreenSpace(float4 vertex)
{
//float2 WIN_SCALE = { 100.0f, 100.0f };
return float2(vertex.xy) * WIN_SCALE;
}
[maxvertexcount(7)]
void main(lineadj float4 vertices[4] : SV_POSITION, inout TriangleStream<PSInput> triStream)
{
//float2 WIN_SCALE = { 100.0f, 100.0f };
float2 p0 = toScreenSpace(vertices[0]); // start of previous segment
float2 p1 = toScreenSpace(vertices[1]); // end of previous segment, start of current segment
float2 p2 = toScreenSpace(vertices[2]); // end of current segment, start of next segment
float2 p3 = toScreenSpace(vertices[3]); // end of next segment
// perform naive culling
float2 area = WIN_SCALE * 1.2;
if (p1.x < -area.x || p1.x > area.x)
return;
if (p1.y < -area.y || p1.y > area.y)
return;
if (p2.x < -area.x || p2.x > area.x)
return;
if (p2.y < -area.y || p2.y > area.y)
return;
float2 v0 = normalize(p1 - p0);
float2 v1 = normalize(p2 - p1);
float2 v2 = normalize(p3 - p2);
// determine the normal of each of the 3 segments (previous, current, next)
float2 n0 = { -v0.y, v0.x};
float2 n1 = { -v1.y, v1.x};
float2 n2 = { -v2.y, v2.x};
// determine miter lines by averaging the normals of the 2 segments
float2 miter_a = normalize(n0 + n1); // miter at start of current segment
float2 miter_b = normalize(n1 + n2); // miter at end of current segment
// determine the length of the miter by projecting it onto normal and then inverse it
//float THICKNESS = 10;
float length_a = THICKNESS / dot(miter_a, n1);
float length_b = THICKNESS / dot(miter_b, n1);
float MITER_LIMIT = -1;
//float MITER_LIMIT = -1;
//float MITER_LIMIT = 1;
PSInput v;
float2 temp;
//// prevent excessively long miters at sharp corners
if (dot(v0, v1) < -MITER_LIMIT)
{
miter_a = n1;
length_a = THICKNESS;
// close the gap
if (dot(v0, n1) > 0)
{
temp = (p1 + THICKNESS * n0) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
temp = (p1 + THICKNESS * n1) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
v.Position = float4(p1 / WIN_SCALE, 0, 1.0);
triStream.Append(v);
triStream.RestartStrip();
}
else
{
temp = (p1 - THICKNESS * n1) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
temp = (p1 - THICKNESS * n0) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
v.Position = float4(p1 / WIN_SCALE, 0, 1.0);
triStream.Append(v);
triStream.RestartStrip();
}
}
if (dot(v1, v2) < -MITER_LIMIT)
{
miter_b = n1;
length_b = THICKNESS;
}
// generate the triangle strip
temp = (p1 + length_a * miter_a) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
temp = (p1 - length_a * miter_a) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
temp = (p2 + length_b * miter_b) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
temp = (p2 - length_b * miter_b) / WIN_SCALE;
v.Position = float4(temp, 0, 1.0);
triStream.Append(v);
triStream.RestartStrip();
}

HLSL Geometry Shader Thick Lines DirectX

I try to draw a thick lined Sinus curve using this approach : https://forum.libcinder.org/topic/smooth-thick-lines-using-geometry-shader
I tried to port it to HLSL geometry shader:
Setting the dimension to fix 500/500 atm
THICKNESS seems not to change the issue
struct PSInput
{
float4 Position : SV_POSITION;
};
float2 toScreenSpace(float4 vertex)
{
float2 WIN_SCALE = { 500.0f, 500.0f };
return float2(vertex.xy / vertex.w) * WIN_SCALE;
}
[maxvertexcount(7)]
void main(lineadj float4 vertices[4] : SV_POSITION, inout TriangleStream<PSInput> triStream)
{
float2 WIN_SCALE = { 500.0f, 500.0f };
float2 p0 = toScreenSpace(vertices[0]); // start of previous segment
float2 p1 = toScreenSpace(vertices[1]); // end of previous segment, start of current segment
float2 p2 = toScreenSpace(vertices[2]); // end of current segment, start of next segment
float2 p3 = toScreenSpace(vertices[3]); // end of next segment
// perform naive culling
float2 area = WIN_SCALE * 1.2;
if (p1.x < -area.x || p1.x > area.x)
return;
if (p1.y < -area.y || p1.y > area.y)
return;
if (p2.x < -area.x || p2.x > area.x)
return;
if (p2.y < -area.y || p2.y > area.y)
return;
float2 v0 = normalize(p1 - p0);
float2 v1 = normalize(p2 - p1);
float2 v2 = normalize(p3 - p2);
// determine the normal of each of the 3 segments (previous, current, next)
float2 n0 = { -v0.y, v0.x};
float2 n1 = { -v1.y, v1.x};
float2 n2 = { -v2.y, v2.x};
// determine miter lines by averaging the normals of the 2 segments
float2 miter_a = normalize(n0 + n1); // miter at start of current segment
float2 miter_b = normalize(n1 + n2); // miter at end of current segment
// determine the length of the miter by projecting it onto normal and then inverse it
float THICKNESS = 1;
float length_a = THICKNESS / dot(miter_a, n1);
float length_b = THICKNESS / dot(miter_b, n1);
float MITER_LIMIT = 0.75;
//float MITER_LIMIT = -1;
//float MITER_LIMIT = 0.1;
PSInput v;
float2 temp;
//// prevent excessively long miters at sharp corners
if (dot(v0, v1) < -MITER_LIMIT)
{
miter_a = n1;
length_a = THICKNESS;
// close the gap
if (dot(v0, n1) > 0)
{
temp = (p1 + THICKNESS * n0) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
temp = (p1 + THICKNESS * n1) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
v.Position = float4(p1 / WIN_SCALE, 1.0, 1.0);
triStream.Append(v);
//triStream.RestartStrip();
}
else
{
temp = (p1 - THICKNESS * n1) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
temp = (p1 - THICKNESS * n0) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
v.Position = float4(p1 / WIN_SCALE, 1.0, 1.0);
triStream.Append(v);
//triStream.RestartStrip();
}
}
if (dot(v1, v2) < -MITER_LIMIT)
{
miter_b = n1;
length_b = THICKNESS;
}
// generate the triangle strip
temp = (p1 + length_a * miter_a) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
temp = (p1 - length_a * miter_a) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
temp = (p2 + length_b * miter_b) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
temp = (p2 - length_b * miter_b) / WIN_SCALE;
v.Position = float4(temp, 1.0, 1.0);
triStream.Append(v);
//triStream.RestartStrip();
}
The thing is, that it won't output anything of the curve I give as points to it. It already works very well without thick lines, but I want to have that. I can add Noise to my Sinus signal and if I do that, (y-values increase by a little random %) suddenly the curve shows up and is thicker and seems to work, but I want it to be thick and showing without noise too.
If I up the frequency, without noise, point appear scattered-plot style, but not as a connected Sinus.
If I debug with the VS Graphics Debugger, I can see that in the frame with no Noise it says the Pixel-Shader-Stage is not run (?).
The frame with noise shows it run and working.
Maybe my porting is incorrect and I forget something, I am very new to programming shaders. Maybe I missunderstand the approach at all.
Any help appreciated.
I needed to set the default behavior of the culling to CullMode.None to get all the triangle drawn.

OpenGL ES on iOS Adding Vertices to Buffers

In my app I am using OpenGL ES to render a file downloaded from the internet that is then parsed into a vertex array, so I have to input vertex and normals data after launch. I am new to OpenGL ES but I have been reading and learning. I have setup a vertex and normals buffer that seem to be working fine, but I think that I am putting the vertex and normals data into the buffers incorrectly because when I load the view there is an object there that vaguely resembles the shape that I want but with triangles veering off in various directions and parts of the shape missing. Here is my code for inputting my data into the buffers:
for (int i = 0; i < triangle_cnt; i++) {
int base = i * 18;
GLfloat x1 = vertices[base];
GLfloat y1 = vertices[base + 1];
GLfloat z1 = vertices[base + 2];
GLfloat x2 = vertices[base + 6];
GLfloat y2 = vertices[base + 7];
GLfloat z2 = vertices[base + 8];
GLfloat x3 = vertices[base + 12];
GLfloat y3 = vertices[base + 13];
GLfloat z3 = vertices[base + 14];
vector_t normal;
vector_t U;
vector_t V;
GLfloat length;
U.x = x2 - x1;
U.y = y2 - y1;
U.z = z2 - z1;
V.x = x3 - x1;
V.y = y3 - y1;
V.z = z3 - z1;
normal.x = U.y * V.z - U.z * V.y;
normal.y = U.z * V.x - U.x * V.z;
normal.z = U.x * V.y - U.y * V.x;
length = normal.x * normal.x + normal.y * normal.y + normal.z * normal.z;
length = sqrt(length);
base = i * 9;
verticesBuff[base] = x1;
verticesBuff[base + 1] = y1;
verticesBuff[base + 2] = z1;
normalsBuff[base] = normal.x;
normalsBuff[base + 1] = normal.y;
normalsBuff[base + 2] = normal.z;
verticesBuff[base + 3] = x2;
verticesBuff[base + 4] = y2;
verticesBuff[base + 5] = z2;
normalsBuff[base + 3] = normal.x;
normalsBuff[base + 4] = normal.y;
normalsBuff[base + 5] = normal.z;
verticesBuff[base + 6] = x3;
verticesBuff[base + 7] = y3;
verticesBuff[base + 8] = z3;
normalsBuff[base + 6] = normal.x;
normalsBuff[base + 7] = normal.y;
normalsBuff[base + 8] = normal.z;
fprintf(stderr, "%ff, %ff, %ff, %ff, %ff, %ff, \n", x1, y1, z1, normal.x, normal.y, normal.z);
fprintf(stderr, "%ff, %ff, %ff, %ff, %ff, %ff, \n", x2, y2, z2, normal.x, normal.y, normal.z);
fprintf(stderr, "%ff, %ff, %ff, %ff, %ff, %ff, \n", x3, y3, z3, normal.x, normal.y, normal.z);
}
And here is the code I use for using those buffers:
- (void)setupGL {
[EAGLContext setCurrentContext:self.context];
[self loadShaders];
self.effect = [[GLKBaseEffect alloc] init];
self.effect.light0.enabled = GL_TRUE;
self.effect.light0.diffuseColor = GLKVector4Make(.05f, .55f, 1.0f, 1.0f);
glEnable(GL_DEPTH_TEST);
glGenVertexArraysOES(1, &_vertexArray);
glBindVertexArrayOES(_vertexArray);
glGenBuffers(1, &_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vertCount*sizeof(verticesBuff)*3*2, NULL, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(verticesBuff) * vertCount * 3, verticesBuff);
glBufferData(GL_ARRAY_BUFFER, vertCount*sizeof(normalsBuff)*3*2, NULL, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, sizeof(GLfloat) * vertCount * 3, sizeof(normalsBuff) * vertCount * 3, normalsBuff);
glEnableVertexAttribArray(GLKVertexAttribPosition);
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 24, BUFFER_OFFSET(0));
glEnableVertexAttribArray(GLKVertexAttribNormal);
glVertexAttribPointer(GLKVertexAttribNormal, 3, GL_FLOAT, GL_FALSE, 24, BUFFER_OFFSET(12));
glBindVertexArrayOES(0);
_rotMatrix = GLKMatrix4Identity;
_quat = GLKQuaternionMake(0, 0, 0, 1);
_quatStart = GLKQuaternionMake(0, 0, 0, 1);
}
- (void)glkView:(GLKView *)view drawInRect:(CGRect)rect {
glClearColor(0.78f, 0.78f, 0.78f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindVertexArrayOES(_vertexArray);
// Render the object with GLKit
[self.effect prepareToDraw];
glVertexPointer(3, GL_FLOAT, 0, verticesBuff);
glNormalPointer(GL_FLOAT, 0, normalsBuff);
glDrawArrays(GL_TRIANGLES, 0, vertCount); //*******************************
// Render the object again with ES2
glUseProgram(_program);
glUniformMatrix4fv(uniforms[UNIFORM_MODELVIEWPROJECTION_MATRIX], 1, 0, _modelViewProjectionMatrix.m);
glUniformMatrix3fv(uniforms[UNIFORM_NORMAL_MATRIX], 1, 0, _normalMatrix.m);
glDrawArrays(GL_TRIANGLES, 0, vertCount);
}
If I take those logs and paste them into the vertex array of a sample app using the code Apple supplies when creating an OpenGL ES app then the object renders beautifully, so I have deduced that I must just be putting the vertex data in wrong.
So can someone help me understand what I am doing wrong when entering the vertices and normals? Any help is appreciated.
Also this is what my render looks like:
And this is, in shape at least, what it should look like:
It's a bit messy and there might be quite a few problems. The one you are currently facing would seem to be:
glVertexAttribPointer(GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, 24, BUFFER_OFFSET(0));
The stride parameter (24) should be sizeof(GLfloat)*3 and same goes for normals. This is because you don't use interleaved vertex structure any more and now your position coordinates are tightly packed (that being said you can also use 0 as a stride parameter now). The result you produced with this bug is that only every second vertex was taken, others were dropped and when positions are all taken it starts drawing normals. Also if your normal pointer and offset are set correctly you are reading them beyond the buffer and should produce either crash or reading some other resources.

Jagged ccDrawCircle

Using Cocos2d to draw a thicker circle:
glLineWidth(20);
ccDrawCircle(self.ripplePosition, _radius, 0, 50, NO);
But this is what shows up(notice how it looks like it's created from 4 different segments):
http://i.stack.imgur.com/jYW4s.png
I tried increasing the number of segments to larger values but the result is the same.
Is this a bug in Cocos2D? Any ideas on how to achieve a "perfect" circle?
Here is the implementation of ccDrawCircle from cocos2d 2.0rc2:
void ccDrawCircle( CGPoint center, float r, float a, NSUInteger segs, BOOL drawLineToCenter)
{
lazy_init();
int additionalSegment = 1;
if (drawLineToCenter)
additionalSegment++;
const float coef = 2.0f * (float)M_PI/segs;
GLfloat *vertices = calloc( sizeof(GLfloat)*2*(segs+2), 1);
if( ! vertices )
return;
for(NSUInteger i = 0;i <= segs; i++) {
float rads = i*coef;
GLfloat j = r * cosf(rads + a) + center.x;
GLfloat k = r * sinf(rads + a) + center.y;
vertices[i*2] = j;
vertices[i*2+1] = k;
}
vertices[(segs+1)*2] = center.x;
vertices[(segs+1)*2+1] = center.y;
[shader_ use];
[shader_ setUniformForModelViewProjectionMatrix];
[shader_ setUniformLocation:colorLocation_ with4fv:(GLfloat*) &color_.r count:1];
ccGLEnableVertexAttribs( kCCVertexAttribFlag_Position );
glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, 0, vertices);
glDrawArrays(GL_LINE_STRIP, 0, (GLsizei) segs+additionalSegment);
free( vertices );
CC_INCREMENT_GL_DRAWS(1);
}
I went with a slightly modified version of ccDrawCircle and it works pretty well (performs a lot better than using and resizing a sprite):
void ccDrawDonut( CGPoint center, float r1, float r2, NSUInteger segs)
{
lazy_init();
const float coef = 2.0f * (float)M_PI/segs;
GLfloat *vertices = calloc( sizeof(GLfloat)*4*segs+4, 1);
if( ! vertices )
return;
for(NSUInteger i = 0;i <= segs; i++) {
float rads = i*coef;
GLfloat j1 = r1 * cosf(rads) + center.x;
GLfloat k1 = r1 * sinf(rads) + center.y;
vertices[i*4] = j1;
vertices[i*4+1] = k1;
rads+= coef/2;
GLfloat j2 = r2 * cosf(rads) + center.x;
GLfloat k2 = r2 * sinf(rads) + center.y;
vertices[i*4+2] = j2;
vertices[i*4+3] = k2;
}
[shader_ use];
[shader_ setUniformForModelViewProjectionMatrix];
[shader_ setUniformLocation:colorLocation_ with4fv:(GLfloat*) &color_.r count:1];
ccGLEnableVertexAttribs( kCCVertexAttribFlag_Position );
glVertexAttribPointer(kCCVertexAttrib_Position, 2, GL_FLOAT, GL_FALSE, 0, vertices);
glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei) 2*segs+2);
free( vertices );
CC_INCREMENT_GL_DRAWS(1);
}

Resources