I'm learning directx programming. Yesterday I succeeded in making a cube.
I'm trying to making a spinning cube. In fact, I succeeded but I'm not sure I use the correct way.
Is the code below commonly used way? I call function which translate vertex position and make new vertex buffer on every frames. Is there any other way which not create vertex buffer on every frames? Is there a more effective way to prove performance?
void BoxApp::makeVertex()
{
Vertex vertices[] =
{
{ XMFLOAT3(-1.0f, -1.0f, -1.0f), (const XMFLOAT4)Colors::White },
{ XMFLOAT3(-1.0f, +1.0f, -1.0f), (const XMFLOAT4)Colors::Black },
{ XMFLOAT3(+1.0f, +1.0f, -1.0f), (const XMFLOAT4)Colors::Red },
{ XMFLOAT3(+1.0f, -1.0f, -1.0f), (const XMFLOAT4)Colors::Green },
{ XMFLOAT3(-1.0f, -1.0f, +1.0f), (const XMFLOAT4)Colors::Blue },
{ XMFLOAT3(-1.0f, +1.0f, +1.0f), (const XMFLOAT4)Colors::Yellow },
{ XMFLOAT3(+1.0f, +1.0f, +1.0f), (const XMFLOAT4)Colors::Cyan },
{ XMFLOAT3(+1.0f, -1.0f, +1.0f), (const XMFLOAT4)Colors::Magenta }
};
rotating_angle += 0.01;
for (int i = 0; i < 8; i++) {
XMStoreFloat3(&vertices[i].pos, XMVector3Transform(XMLoadFloat3(&vertices[i].pos),
XMMatrixTranslation(0.0f, 1.0f, 1.0f) *
XMMatrixRotationX(rotating_angle) *
XMMatrixTranslation(0.0f, -1.0f, -1.0f)
));
}
D3D11_BUFFER_DESC vbd;
... #Set BUFFER_DESC
D3D11_SUBRESOURCE_DATA vinitData;
... #Set SUBRESURCE
md3dDevice->CreateBuffer(&vbd, &vinitData, &mBoxVB);
}
Set vertex position as local coordinate and make transformation matrix from local to world coordinate. send it to a vertex shader using a constant buffer.
Related
I am currently implementing diffuse irridiance(A part of Image based lightning of PBR) in my game engine. I got to the point where I have to take an HDR Image and turn it into a cubemap. I am currently using a EquirectangularToCubemap shader and its working fine. I was able to project the HDR image to a (unit)cube. Now comes the part where I am stuck, I can't turn this cube to a cubemap. I tried using 1 TextureCube, 6 RenderTargetView's and a ShaderResourceView. My plan was to render the (unit)cube 6 times from different view projection with a FOV of 90 to capture the whole side in each of the render target, and lastly copy each of the output of the render target to the corresponding side of the Texture cube.
I don't know how to do this ^.
NOTE: I am using DirextX11 as the rendering backend.
Here is the pseudo code about my problem(which is not working)
glm::mat4 captureProjection = glm::perspective(glm::radians(90.0f), 1.0f, 0.1f, 10.0f);
glm::mat4 captureViews[] =
{
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(-1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f), glm::vec3(0.0f, 0.0f, -1.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f), glm::vec3(0.0f, -1.0f, 0.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, -1.0f), glm::vec3(0.0f, -1.0f, 0.0f))
};
//Create the TextureCube
D3D11_TEXTURE2D_DESC textureDesc = {};
textureDesc.Width = 512;
textureDesc.Height = 512;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 6;
textureDesc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
textureDesc.CPUAccessFlags = 0;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
textureDesc.MiscFlags = D3D11_RESOURCE_MISC_TEXTURECUBE;
ID3D11Texture2D* tex = nullptr;
DX_CALL(DX11Internal::GetDevice()->CreateTexture2D(&textureDesc, nullptr, &tex));
// Create the Shader Resource view for the texture cube
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc = {};
srvDesc.Format = textureDesc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MipLevels = 1;
DX_CALL(DX11Internal::GetDevice()->CreateShaderResourceView(tex, &srvDesc, &mSRV));
//Create the Render target views
Vector<ID3D11RenderTargetView*> rtvs;
for (Uint i = 0; i < 6; i++)
{
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc = {};
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
ID3D11RenderTargetView* view = nullptr;
DX11Internal::GetDevice()->CreateRenderTargetView(tex, &renderTargetViewDesc, &view);
rtvs.push_back(view);
}
tex->Release();
auto deviceContext = DX11Internal::GetDeviceContext();
for (Uint i = 0; i < 6; ++i)
{
float clearColor[4] = { 1.0f, 0.1f, 0.1f, 1.0f };
deviceContext->ClearRenderTargetView(rtvs[i], clearColor);
Vault::Get<Shader>("EquirectangularToCubemap.hlsl")->Bind();
auto data = captureProjection * captureViews[i];
cbuffer->Bind();
cbuffer->SetData(&data);
texture->Bind(0);
tempPipeline->Bind();
deviceContext->OMSetRenderTargets(1, &rtvs[i], nullptr);
//I am rendering the cube here from different view projection to capture the faces, but I dont't know where to copy the data to the //side of the TextureCube :( [Note that I am doing this only once]
RenderCommand::DrawIndexed(tempPipeline, 36);
}
Thanks in advance!
You are not specifying which slices you want to write to when creating render views.
The correct description for slices is:
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc = {};
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DARRAY;
renderTargetViewDesc.Texture2DArray.MipSlice = 0;
renderTargetViewDesc.Texture2DArray.FirstArraySlice =i;
renderTargetViewDesc.Texture2DArray.ArraySize = 1;
(Note : if you use MSAA, ViewDimension will become D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY)
Also to create the shader view for your resource, since you want to create a default one, you can pass NULL instead of a home made D3D11_SHADER_RESOURCE_VIEW_DESC
DX_CALL(DX11Internal::GetDevice()->CreateShaderResourceView(tex, NULL, &mSRV));
You can also create another render view that maps the whole cube map in one go :
DX11Internal::GetDevice()->CreateRenderTargetView(tex, NULL, &cubeMapFullView);
But in that case you will need to use Geometry Shader to replicate geometry across slices (using SV_RenderTargetArrayIndex), or use vendor specific extensions that provide the same kind of feature.
There are 2 textures.
Destination texture - 7201080
source texture - 300300
Help required:
We need to put source texture onto destination texure at a given location (x center, y center, height and width) and fill the remaining destination texture with 0,0.
Challenge we are facing is that source texture is getting enlarged to the size of destination texture.
I tried this to do in it in shader coder as follows.
// Texture Coordinates
static const GLfloat square_vertices[] = {
-1.0f, -1.0f, // bottom left
1.0f, -1.0f, // bottom right
-1.0f, 1.0f, // top left
1.0f, 1.0f, // top right
};
static const GLfloat texture_vertices[] = {
0.0f, 0.0f, // bottom left
1.0f, 0.0f, // bottom right
0.0f, 1.0f, // top left
1.0f, 1.0f, // top right
};
// program
glUseProgram(upsample_program_);
// vertex storage
GLuint vbo[2];
glGenBuffers(2, vbo);
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// vbo 0
glBindBuffer(GL_ARRAY_BUFFER, vbo[0]);
glBufferData(GL_ARRAY_BUFFER, 4 * 2 * sizeof(GLfloat), square_vertices,
GL_STATIC_DRAW);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, nullptr);
// vbo 1
glBindBuffer(GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, 4 * 2 * sizeof(GLfloat), texture_vertices,
GL_STATIC_DRAW);
glEnableVertexAttribArray(ATTRIB_TEXTURE_POSITION);
glVertexAttribPointer(ATTRIB_TEXTURE_POSITION, 2, GL_FLOAT, 0, 0, nullptr);
// draw
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// cleanup
glDisableVertexAttribArray(ATTRIB_VERTEX);
glDisableVertexAttribArray(ATTRIB_TEXTURE_POSITION);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(2, vbo);
// Frame drawing
output_width=720;
output_height=1080;
// Upsample small mask into output.
GlTexture output_texture = CreateDestinationTexture(
output_width, output_height,GpuBufferFormat::kBGRA32);
{
gpu_helper_.BindFramebuffer(output_texture); // GL_TEXTURE0
glActiveTexture(GL_TEXTURE1);
//small_mask_texture is the texture need to render inside the react.
glBindTexture(GL_TEXTURE_2D, small_mask_texture.id());
GlRender();
glBindTexture(GL_TEXTURE_2D, 0);
glUniform2f(glGetUniformLocation(upsample_program_, "rcenters"),normx_center,normy_center);
glUniform2f(glGetUniformLocation(upsample_program_, "rhw"),normalized_width,normalized_height);
glFlush();
}
Here is my shader code;
#if __VERSION__ < 130
#define in varying
#endif // __VERSION__ < 130
#ifdef GL_ES
#define fragColor gl_FragColor
precision highp float;
#else
#define lowp
#define mediump
#define highp
#define texture2D texture
out vec4 fragColor;
#endif // defined(GL_ES)
in vec2 sample_coordinate;
uniform sampler2D input_data;
uniform vec2 rcenters;
uniform vec2 rhw;
void main() {
vec4 pix = texture2D(input_data, sample_coordinate);
float xcenter = rcenters.s;
float ycenter = rcenters.t;
float rwidth = rhw.s;
float rheight = rhw.t;
if(rwidth != 0.0 && rheight != 0.0){
float cox = sample_coordinate.s;
float coy = sample_coordinate.t;
float xmin = xcenter-(rwidth/2.0);
float ymin = ycenter-(rheight/2.0);
float xmax = xcenter+(rwidth/2.0);
float ymax = ycenter+(rheight/2.0);
if((xmin<cox && xmax>cox) && (ymin<coy && ymax>coy)){
fragColor = pix;
}
else{
fragColor = vec4(0.0,0.0,0.0,1.0);
}
}
else{
fragColor = vec4(0.0,0.0,0.0,1.0);
}
}
With this solution I am getting source texture enlarge equal to destination texture. I understand that I need to do something with texture coordinates in vertex but don't know what.
I needed to draw some simple shapes and I decided to go with D3D9. After going through a few of the tutorials on directxtutorial.com, I finally have all the code and knowledge I need to make my first shape appear on the screen. The problem is, though, that no image is appearing. I've looked over the code many times and have compared it with the code on the website, and it all checks out. Why is nothing rendering on the screen?
#define CUSTOMFVF (D3DFVF_XYZRHW | D3DFVF_DIFFUSE)
LPDIRECT3D9 d3d;
LPDIRECT3DDEVICE9 d3ddev;
LPDIRECT3DVERTEXBUFFER9 vbuffer;
struct CUSTOMVERTEX
{
float x, y, z, rhw;
DWORD color;
};
void InitD3D(HWND hWnd)
{
d3d = Direct3DCreate9(D3D_SDK_VERSION);
D3DPRESENT_PARAMETERS d3dpp;
ZeroMemory(&d3dpp, sizeof(d3dpp));
d3dpp.Windowed = true;
d3dpp.hDeviceWindow = hWnd;
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3d->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &d3dpp, &d3ddev);
}
void InitGraphics()
{
CUSTOMVERTEX verticies[]
{
{50, 70, 1.0f, 1.0f, D3DCOLOR_XRGB(250, 0, 0),},
{70, 50, 1.0f, 1.0f, D3DCOLOR_XRGB(0, 250, 0),},
{40, 80, 1.0f, 1.0f, D3DCOLOR_XRGB(0, 0, 250),},
};
d3ddev->CreateVertexBuffer(3 * sizeof(CUSTOMVERTEX), NULL, CUSTOMFVF, D3DPOOL_MANAGED, &vbuffer, NULL);
void* vp;
vbuffer->Lock(0, 0, (void**)&vp, NULL);
memcpy(vp, verticies, sizeof(verticies));
vbuffer->Unlock();
}
void Draw()
{
d3ddev->Clear(NULL, NULL, D3DCLEAR_TARGET, NULL, 1.0f, NULL);
d3ddev->BeginScene();
d3ddev->SetFVF(CUSTOMFVF);
d3ddev->SetStreamSource(0, vbuffer, 0, sizeof(CUSTOMVERTEX));
d3ddev->DrawPrimitive(D3DPT_TRIANGLELIST, 0, 1);
d3ddev->EndScene();
d3ddev->Present(NULL, NULL, NULL, NULL);
}
void ReleaseD3D()
{
d3d->Release();
d3ddev->Release();
vbuffer->Release();
}
Turns out my vertex positions weren't triangle enough to make a triangle. Thanks for the help, though, it reminded me to set up error-checking.
I am trying to port an example of the depth peeling, an Order Independent Transparency technique, to the so-called modern OpenGL (3.3+) but since I am a beginner, it is not that easy..
Here you can find a working version (the GL2) and the one in progress (the GL3)
https://github.com/elect86/modern-jogl-examples/tree/master/modern-jogl-examples/src/depthPeeling
I can't see any layer behind...
I guess there are some problems with the alpha value..
I tried to debug it and in the core part
private void renderDepthPeeling(GL3 gl3) {
/**
* (1) Initialize min depth buffer.
*/
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, colorBlenderFboId[0]);
gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
gl3.glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
gl3.glClear(GL3.GL_COLOR_BUFFER_BIT | GL3.GL_DEPTH_BUFFER_BIT);
gl3.glEnable(GL3.GL_DEPTH_TEST);
dpInit.bind(gl3);
{
gl3.glUniform1f(dpInit.getAlphaUnLoc(), opacity);
drawModel(gl3);
}
dpInit.unbind(gl3);
/**
* (2) Depth peeling + blending.
*/
int layersNumber = (passesNumber - 1) * 2;
// System.out.println("layersNumber: " + layersNumber);
for (int layer = 1; layer < 2; layer++) {
int currentId = layer % 2;
int previousId = 1 - currentId;
// gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, fboId[currentId]);
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, 0);
gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
gl3.glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
gl3.glClear(GL3.GL_COLOR_BUFFER_BIT | GL3.GL_DEPTH_BUFFER_BIT);
gl3.glDisable(GL3.GL_BLEND);
gl3.glEnable(GL3.GL_DEPTH_TEST);
{
dpPeel.bind(gl3);
{
gl3.glActiveTexture(GL3.GL_TEXTURE0);
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, depthTextureId[previousId]);
gl3.glUniform1i(dpPeel.getDepthTexUnLoc(), 0);
{
gl3.glUniform1f(dpPeel.getAlphaUnLoc(), opacity);
drawModel(gl3);
}
gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, 0);
}
dpPeel.unbind(gl3);
gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, colorBlenderFboId[0]);
gl3.glDrawBuffer(GL3.GL_COLOR_ATTACHMENT0);
}
gl3.glDisable(GL3.GL_DEPTH_TEST);
gl3.glEnable(GL3.GL_BLEND);
{
gl3.glBlendEquation(GL3.GL_FUNC_ADD);
gl3.glBlendFuncSeparate(GL3.GL_DST_ALPHA, GL3.GL_ONE, GL3.GL_ZERO, GL3.GL_ONE_MINUS_SRC_ALPHA);
dpBlend.bind(gl3);
dpBlend.bindTextureRECT(gl3, "TempTex", colorTextureId[currentId], 0);
{
// gl3.glCallList(quadDisplayList);
drawFullScreenQuad(gl3);
}
dpBlend.unbind(gl3);
}
gl3.glDisable(GL3.GL_BLEND);
}
/**
* (3) Final pass.
*/
// gl3.glBindFramebuffer(GL3.GL_FRAMEBUFFER, 0);
// gl3.glDrawBuffer(GL3.GL_BACK);
// gl3.glDisable(GL3.GL_DEPTH_TEST);
//
// dpFinal.bind(gl3);
// {
// gl3.glUniform3f(dpFinal.getBackgroundColorUnLoc(), 1.0f, 1.0f, 1.0f);
//
//// dpFinal.bindTextureRECT(gl3, "ColorTex", colorBlenderTextureId[0], 0);
// gl3.glActiveTexture(GL3.GL_TEXTURE0);
// gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, colorBlenderTextureId[0]);
// gl3.glUniform1i(dpFinal.getColorTexUnLoc(), 0);
// {
//// gl3.glCallList(quadDisplayList);
// drawFullScreenQuad(gl3);
// }
// gl3.glBindTexture(GL3.GL_TEXTURE_RECTANGLE, 0);
// }
// dpFinal.unbind(gl3);
}
confronting between the GL2 and GL3 program version the the first and the last passage (1 and 3) looks correct, so the problem lies in the 2
I modified the for cicle in order to get only a cicle
for (int layer = 1; layer < 2; layer++) {
and
// gl2.glBindFramebuffer(GL2.GL_FRAMEBUFFER, fboId[currentId]);
gl2.glBindFramebuffer(GL2.GL_FRAMEBUFFER, 0);
In order to see visually the intermediate result
Well, in the GL2 I get
while in the GL3
My dpPeel program is based on dpPeel_VS
#version 330
layout (location = 0) in vec4 position;
layout(std140) uniform mvpMatrixes {
mat4 projectionMatrix;
mat4 cameraMatrix;
};
void main(void)
{
gl_Position = projectionMatrix * cameraMatrix * position;
}
And dpPeel_FS plus shade_FS
#version 330
uniform samplerRect DepthTex;
vec4 ShadeFragment();
out vec4 outputColor;
void main(void)
{
// Bit-exact comparison between FP32 z-buffer and fragment depth
float frontDepth = texture(DepthTex, gl_FragCoord.xy).r;
if (gl_FragCoord.z <= frontDepth) {
discard;
}
// Shade all the fragments behind the z-buffer
vec4 color = ShadeFragment();
outputColor = vec4(color.rgb * color.a, color.a);
}
#version 330
uniform float Alpha;
vec4 ShadeFragment()
{
vec4 color;
color.rgb = vec3(.4,.85,.0);
color.a = Alpha;
return color;
}
Do you see the error(s)?
Ok solved, it was a problem at the blending passage and the full screen quad
If I would like to do lighting now, where should I apply it?
I am trying to build a simple 2D game using 2D sprites with DirectX 9, and I'm having problems getting the images to come out cleanly. I'd like to load bmp images and display them on the screen as is (no interpolation, no magnification, no filtering or anti-aliasing, etc).
I'm sure I'm missing something, but when I try and render a 100x100 bmp to the screen, it looks choppy and distorted, like a pixel art image would normally look when shrunken slightly. I want the bmp to look exactly as it does when loaded in MS Paint.
Does anyone have any idea why this might be the case? My code is shown below:
Initialization code:
g_DxCom = Direct3DCreate9( D3D_SDK_VERSION );
if ( g_DxCom == NULL )
{
return false;
}
D3DDISPLAYMODE d3dDisplayMode;
if ( FAILED( g_DxCom->GetAdapterDisplayMode( D3DADAPTER_DEFAULT, &d3dDisplayMode ) ) )
{
return false;
}
D3DPRESENT_PARAMETERS d3dPresentParameters;
::ZeroMemory( &d3dPresentParameters, sizeof(D3DPRESENT_PARAMETERS) );
d3dPresentParameters.Windowed = FALSE;
d3dPresentParameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dPresentParameters.BackBufferFormat = d3dDisplayMode.Format; // D3DFMT_X8R8G8B8
d3dPresentParameters.BackBufferWidth = d3dDisplayMode.Width;
d3dPresentParameters.BackBufferHeight = d3dDisplayMode.Height;
d3dPresentParameters.PresentationInterval = D3DPRESENT_INTERVAL_ONE;
if ( FAILED( g_DxCom->CreateDevice( D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
this->hWnd,
D3DCREATE_HARDWARE_VERTEXPROCESSING,
&d3dPresentParameters,
&pd3dDevice ) ) )
{
if ( FAILED( g_DxCom->CreateDevice( D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
this->hWnd,
D3DCREATE_SOFTWARE_VERTEXPROCESSING,
&d3dPresentParameters,
&pd3dDevice ) ) )
{
return false;
}
}
texture = NULL;
bg_texture = NULL;
Render code:
LPDIRECT3DDEVICE9 g_dxDevice;
float float1 = 99.5f; // I'd like to render my 100x100 sprite from screen coordinates 100, 100 to 200, 200
float float2 = 198.5f;
CUSTOMVERTEX OurVertices[] =
{
{ float1, float2, 1.0f, 1.0f, 0.0f, 1.0f },
{ float1, float1, 1.0f, 1.0f, 0.0f, 0.0f },
{ float2, float1, 1.0f, 1.0f, 1.0f, 0.0f },
{ float1, float2, 1.0f, 1.0f, 0.0f, 1.0f },
{ float2, float1, 1.0f, 1.0f, 1.0f, 0.0f },
{ float2, float2, 1.0f, 1.0f, 1.0f, 1.0f }
};
LPDIRECT3DVERTEXBUFFER9 v_buffer;
g_dxDevice->CreateVertexBuffer( 6 * sizeof(CUSTOMVERTEX),
0,
CUSTOMFVF,
D3DPOOL_MANAGED,
&v_buffer,
NULL );
VOID* pVoid;
// Lock the vertex buffer into memory
v_buffer->Lock( 0, 0, &pVoid, 0 );
// Copy our vertex buffer to memory
::memcpy( pVoid, OurVertices, sizeof(OurVertices) );
// Unlock buffer
v_buffer->Unlock();
LPDIRECT3DTEXTURE9 g_texture;
HRESULT hError;
DWORD dwTextureFilter = D3DTEXF_NONE;
g_dxDevice->SetSamplerState( 0, D3DSAMP_MINFILTER, dwTextureFilter );
g_dxDevice->SetSamplerState( 0, D3DSAMP_MAGFILTER, dwTextureFilter );
g_dxDevice->SetSamplerState( 0, D3DSAMP_MIPFILTER, dwTextureFilter );
g_dxDevice->SetTextureStageState(0,D3DTSS_COLOROP,D3DTOP_SELECTARG1);
g_dxDevice->SetTextureStageState(0,D3DTSS_COLORARG1,D3DTA_TEXTURE);
g_dxDevice->SetTextureStageState(0,D3DTSS_COLORARG2,D3DTA_DIFFUSE);
hError = D3DXCreateTextureFromFile( g_dxDevice, L"Test.bmp", &g_texture ); // 100x100 sprite
g_dxDevice->SetTexture( 0, g_texture );
g_dxDevice->Clear( 0,
NULL,
D3DCLEAR_TARGET,
D3DCOLOR_XRGB( 0, 40, 100 ),
1.0f,
0 );
g_dxDevice->BeginScene();
// Do rendering on the back buffer here
g_dxDevice->SetFVF( CUSTOMFVF );
g_dxDevice->SetStreamSource( 0, v_buffer, 0, sizeof(CUSTOMVERTEX) );
g_dxDevice->DrawPrimitive( D3DPT_TRIANGLELIST, 0, 6 );
g_dxDevice->EndScene();
g_dxDevice->Present( NULL, NULL, NULL, NULL );
g_texture->Release();
v_buffer->Release();
Okay, so I've finally figured it out, and I should have known this was the case.
It looks like DirectX9 only works with textures with sizes that are multiples of 2. If I change the texture so that the sprite square is 128 x 128 (just adding some transparency) and run the application with float2 changed appropriately, there is no distortion in the rendered image.
Hurrah...