DirectX Recalculate Interior/Exterior Implementation - directx

For my game, I have added "Brushes" which are 3D boxes that will be used to create level geometry. I wanted to add the ability to flip the brushes inside out, so the exterior becomes the interior, etc. I'm pretty sure I know how to do this, but I wanted to make sure this is actually standard practice. First of all, here's a picture of my BrushVolume in engine:
I generate this BrushVolume via the following code:
public void Rebuild()
{
/**
* No faces are present; empty the StaticBrushVolume
*/
if (this.FaceVisibility == BrushVolumeFaceVisibility.None)
{
if (_bufferBinding.Buffer != null)
{
_bufferBinding.Buffer.Dispose();
}
this.FaceCount = 0;
_drawCount = 0;
return;
}
this.FaceCount = CountVisibleFaces(this.FaceVisibility);
Vertex[] vertices = new Vertex[this.FaceCount * 6];
float width = this.Width;
float height = this.Height;
float depth = this.Depth;
float uvX = width / (float)this.DiffuseMaterial.Width;
float uvY = height / (float)this.DiffuseMaterial.Height;
float uvZ = depth / (float)this.DiffuseMaterial.Width;
// Generate Front Face
int index = 0;
if (this.FaceVisibility.HasFlag(BrushVolumeFaceVisibility.Front))
{
Vector3 normal = new Vector3(0, 0, -1);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, height, -depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(uvX, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(0, uvY), normal);
}
// Generate East Face
if (this.FaceVisibility.HasFlag(BrushVolumeFaceVisibility.Left))
{
Vector3 normal = new Vector3(1, 0, 0);
vertices[index++] = new Vertex(new Vector3(width, -height, depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(uvZ, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, height, -depth), new Vector2(uvZ, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, height, -depth), new Vector2(uvZ, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, height, depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, depth), new Vector2(0, uvY), normal);
}
// Generate South Face
if (this.FaceVisibility.HasFlag(BrushVolumeFaceVisibility.Back))
{
Vector3 normal = new Vector3(0, 0, 1);
vertices[index++] = new Vertex(new Vector3(width, height, depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, depth), new Vector2(uvX, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, height, depth), new Vector2(uvX, 0), normal);
}
// Generate West Face
if (this.FaceVisibility.HasFlag(BrushVolumeFaceVisibility.Right))
{
Vector3 normal = new Vector3(-1, 0, 0);
// Clockwise
vertices[index++] = new Vertex(new Vector3(-width, height, depth), new Vector2(uvZ, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, depth), new Vector2(uvZ, uvY), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, depth), new Vector2(uvZ, 0), normal);
// Counter Clockwise
//vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(0, uvY), normal);
//vertices[index++] = new Vertex(new Vector3(-width, -height, depth), new Vector2(uvZ, uvY), normal);
//vertices[index++] = new Vertex(new Vector3(-width, height, depth), new Vector2(uvZ, 0), normal);
//vertices[index++] = new Vertex(new Vector3(-width, height, depth), new Vector2(uvZ, 0), normal);
//vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(0, 0), normal);
//vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(0, uvY), normal);
}
// Generate Top Face
if (this.FaceVisibility.HasFlag(BrushVolumeFaceVisibility.Top))
{
Vector3 normal = new Vector3(0, 1, 0);
vertices[index++] = new Vertex(new Vector3(width, height, depth), new Vector2(uvX, uvZ), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, depth), new Vector2(0, uvZ), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, height, -depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, height, depth), new Vector2(uvX, uvZ), normal);
}
// Generate Bottom Face
if (this.FaceVisibility.HasFlag(BrushVolumeFaceVisibility.Bottom))
{
Vector3 normal = new Vector3(0, -1, 0);
vertices[index++] = new Vertex(new Vector3(-width, -height, depth), new Vector2(uvX, uvZ), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, depth), new Vector2(0, uvZ), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, depth), new Vector2(uvX, uvZ), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(0, 0), normal);
}
if (this._bufferBinding.Buffer != null)
{
this._bufferBinding.Buffer.Dispose();
}
this._drawCount = vertices.Length;
Buffer vertexBuffer = Buffer.Create(Device, BindFlags.VertexBuffer, vertices);
this._bufferBinding = new VertexBufferBinding(vertexBuffer, 32, 0); // 32 is Vertex Size In Bytes
}
I left a note in there showing my current thinking on how to flip a face from interior to exterior. Quite simply, I was thinking to just invert the winding order. My engine uses clockwise winding by default, so if I were to invert the winding for each face to be counter clockwise, it would appear as an interior in engine. This works, too. However, and it may be a stupid question. But is this standard? I tried Googling how Blender implements "Recalculate Inside", but I can't find anything online discussing the actual implementation details.
Does anyone know if the standard practice for flipping a face from exterior to interior is as simple as reversing the winding order?

I'm fairly certain that inverting the winding order is how this is achieved. I also had to invert the texture coordinates and normals so that the textures displayed correctly. Here is a gif showing the result of my implementation:
In that gif I'm just hitting a key to cycle through the "Hull Mode" of the brushvolume.
The source for this example is located on my pastebin:
Here is a brief example of how I invert from exterior to interior, etc.
if (this.FaceVisibility.HasFlag(BrushVolumeFaceVisibility.Front))
{
if (this.HullMode == BrushVolumeHullMode.Interior)
{
Vector3 normal = new Vector3(0, 0, 1);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, height, -depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(uvX, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(uvX, uvY), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(0, 0), normal);
}
else
{
Vector3 normal = new Vector3(0, 0, -1);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(0, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, height, -depth), new Vector2(0, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, height, -depth), new Vector2(uvX, 0), normal);
vertices[index++] = new Vertex(new Vector3(-width, -height, -depth), new Vector2(uvX, uvY), normal);
vertices[index++] = new Vertex(new Vector3(width, -height, -depth), new Vector2(0, uvY), normal);
}
}
It should be noted that my implementation is not at all elegant, because I hard coded the flipped state into an if branch, depending upon the hull mode. Software like Blender likely has a way more extravagant way of doing this, because they have to iterate over a much larger (and dynamic) set of triangles. There is likely a fancy algorithm for inverting the winding order and texture coordinates. However, I have no idea what it could be, because there is no material for it online.
If anyone knows a better way to do this, I would really like to see it.

Related

what is the use and role of texSubImage2D?

After a lot of searching, I managed to get the texSubImage2D function to work. Simply what I haven't found is: what is this function for. In the example below I made a nice effect. In short, I know how to make it work but I am still completely unaware of the role of its parameters. And where to find these explanations?
I'm not looking for the syntax,
the example I give shows that I have (it seems to me) understood it well.
https://registry.khronos.org/webgl/specs/latest/1.0/#5.14.8
What I don't understand at all is the semantics...
Anyway, if someone could answer with examples so that I can understand.
"use strict";
let canvas = document.getElementById("canvas");
let gl = canvas.getContext("webgl");
gl.canvas.width = 30;
gl.canvas.height = 30;
let vertex = `
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform vec2 u_resolution;
varying vec2 v_texCoord;
void main() {
vec2 zeroToOne = a_position / u_resolution;
vec2 zeroToTwo = zeroToOne * 2.0;
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
v_texCoord = a_texCoord;
}
`;
let fragment = `
precision mediump float;
uniform sampler2D u_image;
varying vec2 v_texCoord;
void main() {
gl_FragColor = texture2D(u_image, v_texCoord);
gl_FragColor.rgb *= gl_FragColor.a;
}
`;
let shader = gl.createProgram();
const vertexShader = gl.createShader(gl.VERTEX_SHADER);
const fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(vertexShader, vertex);
gl.shaderSource(fragmentShader, fragment);
gl.compileShader(vertexShader);
gl.compileShader(fragmentShader);
gl.attachShader(shader, vertexShader);
gl.attachShader(shader, fragmentShader);
gl.linkProgram(shader);
let image_RGBA = new Image();
image_RGBA.src = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeBAMAAADJHrORAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAYUExURdUAAKPTdgCN09Aq0w4A09PS0dOoXwD//56WZMcAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAA8SURBVCjPYyAIBJEBNr4SAmDnG8MALr4LBODmh4IAPn5aWhp+fjkBPgH9BOwn4H4C/icQfgTCHx9gYAAArEg8b+0tf+EAAAAASUVORK5CYII=";
image_RGBA.onload = function() {
go(image_RGBA);
};
function go(image) {
let width = image.width;
let height = image.height;
let positionLocation = gl.getAttribLocation(shader, "a_position");
let texcoordLocation = gl.getAttribLocation(shader, "a_texCoord");
let positionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0, 0,
width, 0,
0, height,
0, height,
width, 0,
width, height
]), gl.STATIC_DRAW);
let texcoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0,
]), gl.STATIC_DRAW);
let texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
const canvas2D = document.getElementById('canvas2D');
canvas2D.width = 30;
canvas2D.height = 30;
const ctx = canvas2D.getContext('2d');
ctx.drawImage(image, 0, 0);
var imgData = ctx.getImageData(0, 0, width, height).data;
var ArrayBufferView = new Uint8Array(imgData.buffer);
gl.texImage2D(
gl.TEXTURE_2D,
0,
gl.RGBA,
30,
30,
0,
gl.RGBA,
gl.UNSIGNED_BYTE,
ArrayBufferView
);
gl.texSubImage2D(
gl.TEXTURE_2D,
0,
0,
0,
29,
29,
gl.RGBA,
gl.UNSIGNED_BYTE,
ArrayBufferView
);
let resolutionLocation = gl.getUniformLocation(shader, "u_resolution");
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearColor(100 / 255, 200 / 255, 150 / 255, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(shader);
gl.enableVertexAttribArray(positionLocation);
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
let size = 2;
let type = gl.FLOAT;
let normalize = false;
let stride = 0;
let offset = 0;
gl.vertexAttribPointer(positionLocation, size, type, normalize, stride, offset);
gl.enableVertexAttribArray(texcoordLocation);
gl.bindBuffer(gl.ARRAY_BUFFER, texcoordBuffer);
size = 2;
type = gl.FLOAT;
normalize = false;
stride = 0;
offset = 0;
gl.vertexAttribPointer(texcoordLocation, size, type, normalize, stride, offset);
gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
gl.enable(gl.BLEND);
gl.blendFunc(gl.ONE, gl.ONE_MINUS_SRC_ALPHA);
gl.drawArrays(gl.TRIANGLES, 0, 6);
}
#canvas {
width: 150px;
height: 150px;
image-rendering: pixelated;
}
#canvas2D {
width: 150px;
height: 150px;
image-rendering: pixelated;
}
<canvas id="canvas2D"></canvas><canvas id="canvas"></canvas>

WebGL render to texture with framebuffer

I am new in webgl and trying to learn it.
I am trying to learn framebuffer and rendering to texture but I am stuck.
What I am trying to do is to copy the colors (pixel data) of a texture to another texture by using framebuffer. I have some other things in mind like doing some calculations before rendering to texture etc. but will do that afterwards.
I have created two 2x2 textures, put some random colors in one of them and bind other to the framebuffer. But I am not getting expected output.
Vertex shader
precision mediump float;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
// Convert texture coordinate to render coordinate
gl_Position = vec4(2.0 * a_texcoord.x - 1.0, 1.0 - 2.0 * a_texcoord.y, 0, 1);
gl_PointSize = 1.0;
v_texcoord = a_texcoord;
}
Fragment shader
precision mediump float;
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main() {
vec4 data = texture2D(u_texture, v_texcoord);
gl_FragColor = data;
}
Javascript
var canvas = document.getElementById("canvas");
// WebGL context
var gl = canvas.getContext("webgl") || canvas.getContext("experimental-webgl");
if (!gl) {
console.log("WebGL not supported");
}
// Set canvas dimensions
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
// Create program with shaders
var program = glUtils.createProgram(gl, 'vshader', 'fshader');
// Texture dimensions
var textureWidth = 2,
textureHeight = 2;
// Texture coordinates
var coords = [];
for (var i = 0; i < textureWidth; ++i) {
for (var j = 0; j < textureHeight; ++j) {
coords.push(i / textureWidth, j / textureHeight);
}
}
// Random colors for texture
var d = [];
for (var i = 0; i < textureWidth * textureHeight; ++i) {
d.push(
Math.floor(Math.random() * 256),
Math.floor(Math.random() * 256),
Math.floor(Math.random() * 256),
Math.floor(Math.random() * 256)
);
}
// Texture with random colors
var data = new Uint8Array(d);
var texture0 = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture0);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, textureWidth, textureHeight, 0, gl.RGBA, gl.UNSIGNED_BYTE, data);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.bindTexture(gl.TEXTURE_2D, null);
// Texture to render to
var texture1 = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture1);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, textureWidth, textureHeight, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.bindTexture(gl.TEXTURE_2D, null);
// Framebuffer
var fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture1, 0);
// Program
gl.useProgram(program);
// Bind texture0
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture0);
gl.uniform1i(program.uniforms.u_texture, 0);
// Bind framebuffer
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture1, 0);
// Set coordinate array
sendCoords(program);
// Set WebGL viewport
setupViewport(gl, textureWidth, textureHeight);
// Clear
gl.clearColor(0, 0, 0, 1);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Draw
gl.drawArrays(gl.POINTS, 0, textureWidth * textureHeight);
// Read pixels
var pixels = new Uint8Array(textureWidth * textureHeight * 4);
gl.readPixels(0, 0, textureWidth, textureHeight, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
gl.deleteFramebuffer(fb);
console.log(pixels);
// Enable and bind data to attribute for the texture coordinates
function sendCoords(program) {
var coordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, coordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(coords), gl.STATIC_DRAW);
gl.vertexAttribPointer(
program.attribs.a_texcoord,
2,
gl.FLOAT,
gl.FALSE,
0,
0
);
gl.enableVertexAttribArray(program.attribs.a_texcoord);
}
// Viewport setup
function setupViewport(gl, width, height) {
gl.viewport(0, 0, width, height);
}
I am expecting exact same data that I have in texture0 but not getting that in gl.readPixels(). Maybe I am doing wrong calculations.
Update:
Here is sample data to make my problem clear.
// Texture coordinates
[0, 0,
0, 0.5,
0.5, 0,
0.5, 0.5]
// Random colors to copy from
[197, 103, 13, 0,
199, 17, 0, 18,
61, 177, 5, 14,
15, 72, 18, 10]
// Data getting by gl.readPixels()
[61, 177, 5, 14,
15, 72, 18, 10,
197, 103, 13, 0,
199, 17, 0, 18]
As you can see, the order is not same. I would like to know why.
Any help would be much appreciated. Thanks.

Flipping character

I would like to flip character when it walks left\right, I created a character from different body parts so flipping each of them caused this:
the reason of course was because it flipped the body parts in their own position, but not all the player together.
after that I had an idea and it was to draw the player to render target and flip the rendertarget when drawing, it worked (kind of), but when I walked when flipped it walked backwards and it also flipped the player position on the screen. here is the code:
if(mLeavusSprite.isflipped==0)
spriteBatch.Draw(character, rec,rec, Color.White,0,Vector2.Zero,SpriteEffects.None,0);
else
spriteBatch.Draw(character, rec, rec, Color.White, 0, Vector2.Zero, SpriteEffects.FlipHorizontally, 0);
character=render target that the player was drawn to.
is there anything I can do? flipping manually going to be a serious pain, I will need to move manually over 10 animations with 4+ frames each twice!
edit:
here is the code for drawing:
if (Frame == 0)
{
HeadPosition.X = Position.X;
HeadPosition.Y = Position.Y;
BodyPosition.X = HeadPosition.X + 8;
BodyPosition.Y = HeadPosition.Y + 32;
TopHandPosition.X = HeadPosition.X + 2;
TopHandPosition.Y = HeadPosition.Y + 36;
BackHandPosition.X = HeadPosition.X + 20;
BackHandPosition.Y = HeadPosition.Y + 36;
HeadSource = new Rectangle(0, 0, this.Head.Width, this.Head.Height);
BodySource = new Rectangle(0, 0, 24, 54);
TopHandSource = new Rectangle(0, 0, 10, 27);
BackHandSource = new Rectangle(0, 0, 15, 27);
theSpriteBatch.Draw(BackHand, BackHandPosition, BackHandSource,
Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(Body, BodyPosition, BodySource,
Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(Head, HeadPosition, HeadSource,
Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(TopHand, TopHandPosition, TopHandSource,
Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
}
Edit 2:
if (Frame == 0)
{
HeadPosition.X = Position.X;
HeadPosition.Y = Position.Y;
BodyPosition.X = HeadPosition.X + 8 ;
BodyPosition.Y = HeadPosition.Y + 32;
TopHandPosition.X = HeadPosition.X + 2 ;
TopHandPosition.Y = HeadPosition.Y + 36;
BackHandPosition.X = HeadPosition.X + 20 ;
BackHandPosition.Y = HeadPosition.Y + 36;
HeadSource = new Rectangle(0, 0, this.Head.Width, this.Head.Height);
BodySource = new Rectangle(0, 0, 24, 54);
TopHandSource = new Rectangle(0, 0, 10, 27);
BackHandSource = new Rectangle(0, 0, 15, 27);
int bigx=0;
int smallx=0;
float[] numbers = new[] { HeadPosition.X, BodyPosition.X , TopHandPosition.X, BackHandPosition.X};
float min = numbers.Min();
numbers = new[] { HeadPosition.X+HeadSource.Width, BodyPosition.X + BodySource.Width, TopHandPosition.X + TopHandSource.Width, BackHandPosition.X + BackHandSource.Width };
float max = numbers.Max();
float center = (max - min) / 2;
if (flip==1)
{
HeadPosition.X = Position.X;
BodyPosition.X = HeadPosition.X +center+ 8*flipOffset;
TopHandPosition.X = HeadPosition.X +center+ 2*flipOffset;
BackHandPosition.X = HeadPosition.X +center+ 20*flipOffset;
}
Debug.WriteLine("fff: " + center);
theSpriteBatch.Draw(BackHand, BackHandPosition, BackHandSource, Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(Body, BodyPosition, BodySource, Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(Head, HeadPosition, HeadSource, Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(TopHand, TopHandPosition, TopHandSource, Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
}
hmm.. that is a tricky one.
There's probably a few different ways to do this and I think drawing they player to a render target is a reasonable approach if you can get it to work.
However, if you want a quick and dirty approach you might be able to simply flip the offsets of each part like this:
var flipOffset = FlipIs == SpriteEffects.FlipHorizontally ? -1 : 1;
if (Frame == 0)
{
HeadPosition.X = Position.X;
HeadPosition.Y = Position.Y;
BodyPosition.X = HeadPosition.X + 8 * flipOffset;
BodyPosition.Y = HeadPosition.Y + 32 * flipOffset;
TopHandPosition.X = HeadPosition.X + 2 * flipOffset;
TopHandPosition.Y = HeadPosition.Y + 36 * flipOffset;
BackHandPosition.X = HeadPosition.X + 20 * flipOffset;
BackHandPosition.Y = HeadPosition.Y + 36 * flipOffset;
HeadSource = new Rectangle(0, 0, this.Head.Width, this.Head.Height);
BodySource = new Rectangle(0, 0, 24, 54);
TopHandSource = new Rectangle(0, 0, 10, 27);
BackHandSource = new Rectangle(0, 0, 15, 27);
theSpriteBatch.Draw(BackHand, BackHandPosition, BackHandSource, Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(Body, BodyPosition, BodySource, Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(Head, HeadPosition, HeadSource, Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
theSpriteBatch.Draw(TopHand, TopHandPosition, TopHandSource,Color.White, 0.0f, Vector2.Zero, Scale, FlipIs, 0);
}
This is assuming that Position is right in the center of the sprite, if not you might need to do some tweaking.
Look, I'll be honest with you. There are better ways to approach this problem, but they'd require a significant redesign of the code and it's difficult to explain in one answer.
The first thing I would do to refactor the design is create a class to represent a single Sprite part to hold the common data points (offset, source rectangle, texture, color, etc) and setup data in one place. The animation code needs to be decoupled from the drawing code, etc.

Create input for cvContourArea in JavaCV

How to create the contour to pass to cvContourArea? I have four 2D points, but don't know to create the CvSeq.
int[] myPolygon = new int[] { point1.x, point1.y, point2.x, point2.y, ... };
cvCountourArea(???, null, 0);
I found the solution:
int[] myPolygon = new int[] { point1.x, point1.y, point2.x, point2.y, ... };
Mat rectMat = cvCreateMat(1, myPolygon.length/2, CV_32SC2);
rectMat.getIntBuffer().put(myPolygon);
cvCountourArea(rectMat, CV_WHOLE_SEQ, 0);
cvReleaseMat(rectMat);

XNA spritebatch and primative using different axis with same effect

I am developing a side scroller game using monogame 3.0 (XNA-C#). I am using a TriangleStrip to draw the ground and a spritebatch to draw a fence. I want the fence to be on top of the ground but it seems to be using a different axis (negative instead of positive). In order to align things up I need to flip the fence and use a negative x and y. Both the sprite and the primitive are using the same view matrix and project.
Why is the spritebatch need a negative axis while the primitive is using a positive one when they are using the same matrixes?
protected override void Initialize()
{
viewMatrix = Matrix.CreateLookAt(new Vector3(0, 0, 1), Vector3.Zero, new Vector3(0,1, 0));
projectionMatrix = Matrix.CreateOrthographicOffCenter(0,GraphicsDevice.Viewport.Width,
0, GraphicsDevice.Viewport.Height, 0f, 200f) *Matrix.CreateScale(new Vector3(.5f, .5f, 1f));
GraphicsDevice.RasterizerState.CullMode = CullMode.None;
_groundBasicEffect = new BasicEffect(GraphicsDevice);
_groundBasicEffect.View = viewMatrix;
_groundBasicEffect.Projection = projectionMatrix;
_groundBasicEffect.VertexColorEnabled = true;
_groundBasicEffect.World = world;
_fenceBasicEffect = new BasicEffect(GraphicsDevice);
_fenceBasicEffect.View = Matrix.CreateLookAt(new Vector3(0, 0, 1), Vector3.Zero, new Vector3(0,1, 0));
_fenceBasicEffect.Projection = projectionMatrix;
_fenceBasicEffect.TextureEnabled = true;
_fenceBasicEffect.World = world;
base.Initialize();
}
protected override void LoadContent()
{
_spriteBatch = new SpriteBatch(GraphicsDevice);
fenceTexture = this.Content.Load<Texture2D>(#"Fence1");
vertexData = new VertexPositionColor[8];
vertexData[0] = new VertexPositionColor(new Vector3(0, 0, 0), Color.Black);
vertexData[1] = new VertexPositionColor(new Vector3(0, 400, 0), Color.Black);
vertexData[2] = new VertexPositionColor(new Vector3(200, 0, 0), Color.Black);
vertexData[3] = new VertexPositionColor(new Vector3(200, 400, 0), Color.Black);
vertexData[4] = new VertexPositionColor(new Vector3(300, 0, 0), Color.Black);
vertexData[5] = new VertexPositionColor(new Vector3(300, 430, 0), Color.Black);
vertexData[6] = new VertexPositionColor(new Vector3(1200, 0, 0), Color.Black);
vertexData[7] = new VertexPositionColor(new Vector3(1200, 430, 0), Color.Black);
vertexBuffer = new VertexBuffer(GraphicsDevice, typeof(VertexPositionColor), vertexData.Length, BufferUsage.None);
vertexBuffer.SetData(vertexData);
}
protected override void Draw(GameTime gameTime)
{
GraphicsDevice.Clear(Color.White);
GraphicsDevice.SetVertexBuffer(vertexBuffer);
_groundBasicEffect.CurrentTechnique.Passes[0].Apply();
GraphicsDevice.DrawUserPrimitives<VertexPositionColor>(PrimitiveType.TriangleStrip, vertexData, 0, vertexData.Length -2);
_spriteBatch.Begin(0, null, null, null, null, _fenceBasicEffect);
_spriteBatch.Draw(fenceTexture, Vector2.Zero, new Rectangle(0, 0, 1130, 221), Color.White, 0, new Vector2(0, 200), 1, SpriteEffects.None, 0);
_spriteBatch.End();
base.Draw(gameTime);
}
How it looks...
How I expect it to look...
In order to get this look I need to change the Draw of the fence to this...
_spriteBatch.Draw(fenceTexture, Vector2.Zero, new Rectangle(0, 0, 1130, 221), Color.White, 0, new Vector2(0, -400), 1, SpriteEffects.FlipVertically, 0);

Resources