How to create two objects from one vertices array with translate? - directx

I have an object and I can render it but I want to use its vertices twice but I don't know how to.
Edit: I wan them to translate independently during the game.
this is my code reading object from txt:
fin.open("piyon.txt");
fin >> vertexCountpiyon;
verticespiyon = new SimpleVertex[vertexCountpiyon];
for(int i=0; i<vertexCountpiyon; i++)
{
fin >> verticespiyon[i].Pos.x >> verticespiyon[i].Pos.y >> verticespiyon[i].Pos.z;
fin >> verticespiyon[i].Tex.x >> verticespiyon[i].Tex.y;
fin >> verticespiyon[i].Normal.x >> verticespiyon[i].Normal.y >> verticespiyon[i].Normal.z;
}
fin.close();
bd.ByteWidth = sizeof( SimpleVertex ) * vertexCountpiyon;
ZeroMemory( &InitData, sizeof(InitData) );
InitData.pSysMem = verticespiyon;
hr = g_pd3dDevice->CreateBuffer( &bd, &InitData, &g_pVertexBuffer_piyon );
if( FAILED( hr ) ) return hr;
and my render code:
g_pImmediateContext->IASetVertexBuffers( 0, 1, &g_pVertexBuffer_piyon, &stride, &offset );
cBuffer.vMeshColor = XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f );
XMMATRIX mTranslateBeyazPiyon = XMMatrixTranslation( -17.5F, 0, -12.5F );
cBuffer.mWorld = XMMatrixTranspose( mTranslateBeyazPiyon );
g_World_Piyon = mTranslateBeyazPiyon;
g_pImmediateContext->UpdateSubresource( g_pConstantBuffer, 0, NULL, &cBuffer, 0, 0 );
g_pImmediateContext->VSSetShader( g_pVertexShader, NULL, 0 );
g_pImmediateContext->VSSetConstantBuffers( 2, 1, &g_pConstantBuffer );
g_pImmediateContext->PSSetShader( g_pPixelShader, NULL, 0 );
g_pImmediateContext->PSSetConstantBuffers( 2, 1, &g_pConstantBuffer );
g_pImmediateContext->Draw( 7050, 0 );

If you want to just draw the same object in a different place, you only need to change your world matrix and then draw again. So, using parts of your code for reference, something like this:
// set world matrix for first object ...
XMMATRIX mTranslateBeyazPiyon = XMMatrixTranslation( -17.5F, 0, -12.5F );
cBuffer.mWorld = XMMatrixTranspose( mTranslateBeyazPiyon );
g_pImmediateContext->UpdateSubresource( g_pConstantBuffer, 0, NULL, &cBuffer, 0, 0 );
// ... set any other common state
// draw first object
g_pImmediateContext->Draw( 7050, 0 );
// set world matrix for second object ... for example, translated somewhere else
mTranslateBeyazPiyon = XMMatrixTranslation( -34.5F, 0, -24.5F );
cBuffer.mWorld = XMMatrixTranspose( mTranslateBeyazPiyon );
g_pImmediateContext->UpdateSubresource( g_pConstantBuffer, 0, NULL, &cBuffer, 0, 0 );
// draw second object
g_pImmediateContext->Draw( 7050, 0 );

Related

Swap Histogram of two different images

I have two different images (Image A and Image B), whose histograms (histImage and histImage1) i have already computed.
Now I want that the histogram of Image A becomes the histogram of Image B. So that the Image B gets the colors similar to Image A.
code is as follow:
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main( )
{
Mat src, dst, src1;
/// Load image
src = imread("ImageA", 1 ); // Image A
src1 = imread("ImageB", 1 ); // Image B
if( !src.data )
{ return -1; }
/// Separate the image in 3 places ( B, G and R )
vector<Mat> bgr_planes;
vector<Mat> bgr_planes1;
split( src, bgr_planes );
split( src1, bgr_planes1 );
/// Establish the number of bins
int histSize = 256;
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 } ;
const float* histRange = { range };
bool uniform = true; bool accumulate = false;
Mat b_hist, g_hist, r_hist; //ImageA
Mat b_hist1, g_hist1, r_hist1; //ImageB
/// Compute the histograms of Image A
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
/// Compute the histograms of Image B
calcHist( &bgr_planes1[0], 1, 0, Mat(), b_hist1, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes1[1], 1, 0, Mat(), g_hist1, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes1[2], 1, 0, Mat(), r_hist1, 1, &histSize, &histRange, uniform, accumulate );
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400; //Image A
int bin_w = cvRound( (double) hist_w/histSize ); //Image A
int hist_w1 = 512; int hist_h1 = 400; //Image B
int bin_w1 = cvRound( (double) hist_w1/histSize );//Image B
Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) ); //ImageA
Mat histImage1( hist_h1, hist_w1, CV_8UC3, Scalar( 0,0,0) ); //ImageB
/// Normalize the result to [ 0, histImage.rows ] ImageA
normalize(b_hist, b_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(g_hist, g_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(r_hist, r_hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
/// Normalize the result to [ 0, histImage.rows ] ImageB
normalize(b_hist1, b_hist1, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(g_hist1, g_hist1, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
normalize(r_hist1, r_hist1, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
/// Draw for each channel ImageA
for( int i = 1; i < histSize; i++ )
{
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ),
Scalar( 255, 0, 0), 2, 8, 0 );
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
Scalar( 0, 255, 0), 2, 8, 0 );
line( histImage, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ) ,
Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
Scalar( 0, 0, 255), 2, 8, 0 );
}
////////////////////////////////////////////////////
/// Draw for each channel ImageB
for( int i = 1; i < histSize; i++ )
{
line( histImage1, Point( bin_w1*(i-1), hist_h1 - cvRound(b_hist1.at<float>(i-1)) ) ,
Point( bin_w1*(i), hist_h1 - cvRound(b_hist1.at<float>(i)) ),
Scalar( 255, 0, 0), 2, 8, 0 );
line( histImage1, Point( bin_w1*(i-1), hist_h1 - cvRound(g_hist1.at<float>(i-1)) ) ,
Point( bin_w1*(i), hist_h1 - cvRound(g_hist1.at<float>(i)) ),
Scalar( 0, 255, 0), 2, 8, 0 );
line( histImage1, Point( bin_w1*(i-1), hist_h1 - cvRound(r_hist1.at<float>(i-1)) ) ,
Point( bin_w1*(i), hist_h1 - cvRound(r_hist1.at<float>(i)) ),
Scalar( 0, 0, 255), 2, 8, 0 );
}
/////////////////////////////////////////////////////
/// Display
namedWindow("calcHist", CV_WINDOW_AUTOSIZE );
imshow("face ", histImage ); //Histogram of Image A
/// Display
namedWindow("calcHist1", CV_WINDOW_AUTOSIZE );
imshow("body ", histImage1 ); //Histogram of Image B
waitKey(0);
return 0;
}
One way to swap the histograms would be to follow the methodology used in histogram equalisation.
Compute the histograms (H1 and H2) respectively for the two images (I1 and I2) and normalise them (already done in your code).
Compute the cumulative histograms - also called cumulative distribution functions - C1 and C2 corresponding to H1 and H2 as explained here.
Substitute new values for every pixel in I1 using the cumulative histogram C2 as explained here.
Do the same for every pixel in I2, using cumulative histogram C1.

2D Programming with Direct3D 9 - Test image is distorted

I am trying to build a simple 2D game using 2D sprites with DirectX 9, and I'm having problems getting the images to come out cleanly. I'd like to load bmp images and display them on the screen as is (no interpolation, no magnification, no filtering or anti-aliasing, etc).
I'm sure I'm missing something, but when I try and render a 100x100 bmp to the screen, it looks choppy and distorted, like a pixel art image would normally look when shrunken slightly. I want the bmp to look exactly as it does when loaded in MS Paint.
Does anyone have any idea why this might be the case? My code is shown below:
Initialization code:
g_DxCom = Direct3DCreate9( D3D_SDK_VERSION );
if ( g_DxCom == NULL )
{
return false;
}
D3DDISPLAYMODE d3dDisplayMode;
if ( FAILED( g_DxCom->GetAdapterDisplayMode( D3DADAPTER_DEFAULT, &d3dDisplayMode ) ) )
{
return false;
}
D3DPRESENT_PARAMETERS d3dPresentParameters;
::ZeroMemory( &d3dPresentParameters, sizeof(D3DPRESENT_PARAMETERS) );
d3dPresentParameters.Windowed = FALSE;
d3dPresentParameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dPresentParameters.BackBufferFormat = d3dDisplayMode.Format; // D3DFMT_X8R8G8B8
d3dPresentParameters.BackBufferWidth = d3dDisplayMode.Width;
d3dPresentParameters.BackBufferHeight = d3dDisplayMode.Height;
d3dPresentParameters.PresentationInterval = D3DPRESENT_INTERVAL_ONE;
if ( FAILED( g_DxCom->CreateDevice( D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
this->hWnd,
D3DCREATE_HARDWARE_VERTEXPROCESSING,
&d3dPresentParameters,
&pd3dDevice ) ) )
{
if ( FAILED( g_DxCom->CreateDevice( D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
this->hWnd,
D3DCREATE_SOFTWARE_VERTEXPROCESSING,
&d3dPresentParameters,
&pd3dDevice ) ) )
{
return false;
}
}
texture = NULL;
bg_texture = NULL;
Render code:
LPDIRECT3DDEVICE9 g_dxDevice;
float float1 = 99.5f; // I'd like to render my 100x100 sprite from screen coordinates 100, 100 to 200, 200
float float2 = 198.5f;
CUSTOMVERTEX OurVertices[] =
{
{ float1, float2, 1.0f, 1.0f, 0.0f, 1.0f },
{ float1, float1, 1.0f, 1.0f, 0.0f, 0.0f },
{ float2, float1, 1.0f, 1.0f, 1.0f, 0.0f },
{ float1, float2, 1.0f, 1.0f, 0.0f, 1.0f },
{ float2, float1, 1.0f, 1.0f, 1.0f, 0.0f },
{ float2, float2, 1.0f, 1.0f, 1.0f, 1.0f }
};
LPDIRECT3DVERTEXBUFFER9 v_buffer;
g_dxDevice->CreateVertexBuffer( 6 * sizeof(CUSTOMVERTEX),
0,
CUSTOMFVF,
D3DPOOL_MANAGED,
&v_buffer,
NULL );
VOID* pVoid;
// Lock the vertex buffer into memory
v_buffer->Lock( 0, 0, &pVoid, 0 );
// Copy our vertex buffer to memory
::memcpy( pVoid, OurVertices, sizeof(OurVertices) );
// Unlock buffer
v_buffer->Unlock();
LPDIRECT3DTEXTURE9 g_texture;
HRESULT hError;
DWORD dwTextureFilter = D3DTEXF_NONE;
g_dxDevice->SetSamplerState( 0, D3DSAMP_MINFILTER, dwTextureFilter );
g_dxDevice->SetSamplerState( 0, D3DSAMP_MAGFILTER, dwTextureFilter );
g_dxDevice->SetSamplerState( 0, D3DSAMP_MIPFILTER, dwTextureFilter );
g_dxDevice->SetTextureStageState(0,D3DTSS_COLOROP,D3DTOP_SELECTARG1);
g_dxDevice->SetTextureStageState(0,D3DTSS_COLORARG1,D3DTA_TEXTURE);
g_dxDevice->SetTextureStageState(0,D3DTSS_COLORARG2,D3DTA_DIFFUSE);
hError = D3DXCreateTextureFromFile( g_dxDevice, L"Test.bmp", &g_texture ); // 100x100 sprite
g_dxDevice->SetTexture( 0, g_texture );
g_dxDevice->Clear( 0,
NULL,
D3DCLEAR_TARGET,
D3DCOLOR_XRGB( 0, 40, 100 ),
1.0f,
0 );
g_dxDevice->BeginScene();
// Do rendering on the back buffer here
g_dxDevice->SetFVF( CUSTOMFVF );
g_dxDevice->SetStreamSource( 0, v_buffer, 0, sizeof(CUSTOMVERTEX) );
g_dxDevice->DrawPrimitive( D3DPT_TRIANGLELIST, 0, 6 );
g_dxDevice->EndScene();
g_dxDevice->Present( NULL, NULL, NULL, NULL );
g_texture->Release();
v_buffer->Release();
Okay, so I've finally figured it out, and I should have known this was the case.
It looks like DirectX9 only works with textures with sizes that are multiples of 2. If I change the texture so that the sprite square is 128 x 128 (just adding some transparency) and run the application with float2 changed appropriately, there is no distortion in the rendered image.
Hurrah...

iOS 5.1 OpenGL ES 2.0 - Texture Clamping issue

I'm simply just trying to repeat the texture loaded using the iOS 5.1 GLKit improvements, that I've drawn onto a simple quad.
Here are the verticies and texCoords
#define ROAD_TEX_COORD_MAX 10
const Vertex roadVertices[] =
{
{ { 90, -90, 0 } , { 1, 1, 1, 1 }, { ROAD_TEX_COORD_MAX, 0 } }, // Bot R - 0
{ { 90, 90, 0 } , { 1, 1, 1, 1 }, { ROAD_TEX_COORD_MAX, ROAD_TEX_COORD_MAX } }, // Top R - 1
{ { -90, 90, 0 } , { 1, 1, 1, 1 }, { 0 , ROAD_TEX_COORD_MAX } }, // Top L - 2
{ { -90, -90, 0 } , { 1, 1, 1, 1 }, { 0 , 0 } } // Bot L - 3
};
const GLubyte roadIndices[] =
{
0, 1, 2,
2, 3, 0
};
Here is the render routine:
- ( void ) render
{
[self.baseEffect prepareToDraw];
glBindBuffer( GL_ARRAY_BUFFER, vertexBuffer );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, indexBuffer );
glEnableVertexAttribArray( GLKVertexAttribPosition );
glVertexAttribPointer( GLKVertexAttribPosition, 3, GL_FLOAT, GL_FALSE, sizeof( Vertex ), ( const GLvoid * ) offsetof( Vertex, Position ) );
glEnableVertexAttribArray( GLKVertexAttribColor );
glVertexAttribPointer( GLKVertexAttribColor, 4, GL_FLOAT, GL_FALSE, sizeof( Vertex ), ( const GLvoid * ) offsetof( Vertex, Color ) );
glEnableVertexAttribArray( GLKVertexAttribTexCoord0 );
glVertexAttribPointer( GLKVertexAttribTexCoord0, 2, GL_FLOAT, GL_FALSE, sizeof( Vertex ), ( const GLvoid * ) offsetof( Vertex, TexCoord ) );
glDrawElements( GL_TRIANGLES, sizeof( roadIndices ) / sizeof( roadIndices[0] ), GL_UNSIGNED_BYTE, 0 );
}
However, this is what I'm ending up with:
Now I've read that by default the EAGL setup using the REPEAT texture setting and you have to manually set it to CLAMP_TO_EDGE. However, that's not the case here - I'm not setting either of these (although I have tried to disable the CLAMP and enable the REPEAT and all that did was to make the quad black and no texture was visible.

Retrieving depthBuffer of a render for another render pass. (D3DX9)

I am rendering a scene to a texture, then using that texture and its depthBuffer in subsequent renderings to apply some effects (Depth Of Field, Bloom, etc...).
Here is the problem: I manage to set the DepthStencilBuffer to a custom surface and render my scene to a texture. The scene is rendered perfectly, and there is no depth problems. This being said, when using the texture (to which the custom surface is bound), all depth values are exactly 1.0
I have suspected the call to device->clear to affect the wrong surface, but even when disabling them, it doesn't fix anything.
Here is the actual code :
HRESULT hr = S_FALSE;
VCND3D* renderer = static_cast<VCND3D*>( VCNRenderCore::GetInstance() );
LPDIRECT3DDEVICE9 device = renderer->GetD3DDevice();
//device->EndScene();
VCNDXShaderCore* shaderCore = static_cast<VCNDXShaderCore*>( VCNDXShaderCore::GetInstance() );
VCNNode* rootNode = VCNNodeCore::GetInstance()->GetRootNode();
LPDIRECT3DSURFACE9 currentSurface;
device->GetRenderTarget(0, &currentSurface);
LPDIRECT3DSURFACE9 currentDepthBuffer;
device->GetDepthStencilSurface( &currentDepthBuffer );
///// INITIAL HDR RENDER : //////////////////////////////////////
hr = device->SetRenderTarget( 0, mInitialHDRSurface );
VCN_ASSERT( SUCCEEDED(hr) );
hr = device->SetDepthStencilSurface( mDepthSurface );
VCN_ASSERT( SUCCEEDED(hr) );
device->Clear( 0,
NULL,
D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER,
D3DCOLOR_XRGB(100,100,100),
1.0f,
0 );
//device->BeginScene();
rootNode->Render();
//device->EndScene();
//hr = device->UpdateSurface( currentDepthBuffer, NULL, mDepthSurface, NULL );
//VCN_ASSERT( SUCCEEDED(hr) );
//////////// DOF EFFECT : /////////////////////////////////////////////////
hr = device->SetRenderTarget( 0, currentSurface );
VCN_ASSERT( SUCCEEDED(hr) );
device->SetRenderState(D3DRS_COLORWRITEENABLE,
D3DCOLORWRITEENABLE_ALPHA | D3DCOLORWRITEENABLE_RED | D3DCOLORWRITEENABLE_GREEN | D3DCOLORWRITEENABLE_BLUE);
device->Clear( 0,
NULL,
D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER,
D3DCOLOR_XRGB(100,100,100),
1.0f,
0 );
//device->BeginScene();
// Set DOF shader
VCNDXShader* dofShader = shaderCore->GetShader( sidDof );
// Field of view to cover full screen :
hr = device->SetFVF(D3DFVF_SCREEN);
VCN_ASSERT( SUCCEEDED(hr) );
// select the vertex buffer to display
hr = device->SetStreamSource(0, mScreenVertexBuffer, 0, sizeof(SCREENVERTEX));
VCN_ASSERT( SUCCEEDED(hr) );
// Draw the Dof result
hr = dofShader->GetEffect()->SetTechnique( "BaseTechnique" );
VCN_ASSERT( SUCCEEDED(hr) );
hr = dofShader->GetEffect()->SetTexture( "gInputImageTexture", mInitialHDRTexture );
VCN_ASSERT( SUCCEEDED(hr) );
hr = dofShader->GetEffect()->SetTexture( "gDepthTexture", mDepthTexture );
VCN_ASSERT( SUCCEEDED(hr) );
hr = dofShader->GetEffect()->SetFloat( "gMinDepth", 3.0f );
VCN_ASSERT( SUCCEEDED(hr) );
hr = dofShader->GetEffect()->SetFloat( "gMaxDepth", 7.0f );
VCN_ASSERT( SUCCEEDED(hr) );
dofShader->RenderPrimitive( 2, D3DPT_TRIANGLESTRIP );
hr = device->SetDepthStencilSurface( currentDepthBuffer );
VCN_ASSERT( SUCCEEDED(hr) );
And here are the init of the texture and surfaces, because the error could be there :
// Texture for the initial HDR Render (Before effects)
hr = device->CreateTexture(res.x, res.y, 1, D3DUSAGE_RENDERTARGET, D3DFMT_A32B32G32R32F, D3DPOOL_DEFAULT, &mInitialHDRTexture, 0);
VCN_ASSERT_MSG( SUCCEEDED(hr), _T("Unable to create initial HDR Render Texture") );
// Extract the associated surface for rendering
hr = mInitialHDRTexture->GetSurfaceLevel(0, &mInitialHDRSurface);
VCN_ASSERT( SUCCEEDED(hr) );
// Texture with depth of the scene
hr = device->CreateTexture(res.x, res.y, 1, D3DUSAGE_DEPTHSTENCIL, D3DFMT_D32F_LOCKABLE, D3DPOOL_DEFAULT, &mDepthTexture, 0);
VCN_ASSERT_MSG( SUCCEEDED(hr), _T("Unable to create depth texture") );
// Extract the associated surface for rendering
hr = mDepthTexture->GetSurfaceLevel(0, &mDepthSurface);
VCN_ASSERT( SUCCEEDED(hr) );
Note that no ASSERT is getting triggered (which is exactly what is pissing me off, I would like some errors as an indication of where to look)
Nic
If I read your code correctly you clear mDepthSurface twice: first in INITIAL HDR RENDER section, then in DOF EFFECT. You either need to do a SetDepthStencilSurface before second clear, or not setting D3DCLEAR_ZBUFFER in that call. Otherwise second clear discards whatever is rendered by rootNode->Render() and at effect start you have all clear z-buffer.

DirectX 10 Primitive is not displayed

I am trying to write my first DirectX 10 program that displays a triangle. Everything compiles fine, and the render function is called, since the background changes to black. However, the triangle I'm trying to draw with a triangle strip primitive is not displayed at all.
The Initialization function:
bool InitDirect3D(HWND hWnd, int width, int height)
{
//****** D3DDevice and SwapChain *****//
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(swapChainDesc));
swapChainDesc.BufferCount = 1;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.Windowed = TRUE;
if (FAILED(D3D10CreateDeviceAndSwapChain( NULL,
D3D10_DRIVER_TYPE_HARDWARE,
NULL,
0,
D3D10_SDK_VERSION,
&swapChainDesc,
&pSwapChain,
&pD3DDevice)))
return fatalError(TEXT("Hardware does not support DirectX 10!"));
//***** Shader *****//
if (FAILED(D3DX10CreateEffectFromFile( TEXT("basicEffect.fx"),
NULL, NULL,
"fx_4_0",
D3D10_SHADER_ENABLE_STRICTNESS,
0,
pD3DDevice,
NULL,
NULL,
&pBasicEffect,
NULL,
NULL)))
return fatalError(TEXT("Could not load effect file!"));
pBasicTechnique = pBasicEffect->GetTechniqueByName("Render");
pViewMatrixEffectVariable = pBasicEffect->GetVariableByName( "View" )->AsMatrix();
pProjectionMatrixEffectVariable = pBasicEffect->GetVariableByName( "Projection" )->AsMatrix();
pWorldMatrixEffectVariable = pBasicEffect->GetVariableByName( "World" )->AsMatrix();
//***** Input Assembly Stage *****//
D3D10_INPUT_ELEMENT_DESC layout[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D10_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D10_INPUT_PER_VERTEX_DATA, 0}
};
UINT numElements = 2;
D3D10_PASS_DESC PassDesc;
pBasicTechnique->GetPassByIndex(0)->GetDesc(&PassDesc);
if (FAILED( pD3DDevice->CreateInputLayout( layout,
numElements,
PassDesc.pIAInputSignature,
PassDesc.IAInputSignatureSize,
&pVertexLayout)))
return fatalError(TEXT("Could not create Input Layout."));
pD3DDevice->IASetInputLayout( pVertexLayout );
//***** Vertex buffer *****//
UINT numVertices = 100;
D3D10_BUFFER_DESC bd;
bd.Usage = D3D10_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(vertex) * numVertices;
bd.BindFlags = D3D10_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
if (FAILED(pD3DDevice->CreateBuffer(&bd, NULL, &pVertexBuffer)))
return fatalError(TEXT("Could not create vertex buffer!"));;
UINT stride = sizeof(vertex);
UINT offset = 0;
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
//***** Rasterizer *****//
// Set the viewport
viewPort.Width = width;
viewPort.Height = height;
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
viewPort.TopLeftX = 0;
viewPort.TopLeftY = 0;
pD3DDevice->RSSetViewports(1, &viewPort);
D3D10_RASTERIZER_DESC rasterizerState;
rasterizerState.CullMode = D3D10_CULL_NONE;
rasterizerState.FillMode = D3D10_FILL_SOLID;
rasterizerState.FrontCounterClockwise = true;
rasterizerState.DepthBias = false;
rasterizerState.DepthBiasClamp = 0;
rasterizerState.SlopeScaledDepthBias = 0;
rasterizerState.DepthClipEnable = true;
rasterizerState.ScissorEnable = false;
rasterizerState.MultisampleEnable = false;
rasterizerState.AntialiasedLineEnable = true;
ID3D10RasterizerState* pRS;
pD3DDevice->CreateRasterizerState(&rasterizerState, &pRS);
pD3DDevice->RSSetState(pRS);
//***** Output Merger *****//
// Get the back buffer from the swapchain
ID3D10Texture2D *pBackBuffer;
if (FAILED(pSwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&pBackBuffer)))
return fatalError(TEXT("Could not get back buffer."));
// create the render target view
if (FAILED(pD3DDevice->CreateRenderTargetView(pBackBuffer, NULL, &pRenderTargetView)))
return fatalError(TEXT("Could not create the render target view."));
// release the back buffer
pBackBuffer->Release();
// set the render target
pD3DDevice->OMSetRenderTargets(1, &pRenderTargetView, NULL);
return true;
}
The render function:
void Render()
{
if (pD3DDevice != NULL)
{
pD3DDevice->ClearRenderTargetView(pRenderTargetView, D3DXCOLOR(0.0f, 0.0f, 0.0f, 0.0f));
//create world matrix
static float r;
D3DXMATRIX w;
D3DXMatrixIdentity(&w);
D3DXMatrixRotationY(&w, r);
r += 0.001f;
//set effect matrices
pWorldMatrixEffectVariable->SetMatrix(w);
pViewMatrixEffectVariable->SetMatrix(viewMatrix);
pProjectionMatrixEffectVariable->SetMatrix(projectionMatrix);
//fill vertex buffer with vertices
UINT numVertices = 3;
vertex* v = NULL;
//lock vertex buffer for CPU use
pVertexBuffer->Map(D3D10_MAP_WRITE_DISCARD, 0, (void**) &v );
v[0] = vertex( D3DXVECTOR3(-1,-1,0), D3DXVECTOR4(1,0,0,1) );
v[1] = vertex( D3DXVECTOR3(0,1,0), D3DXVECTOR4(0,1,0,1) );
v[2] = vertex( D3DXVECTOR3(1,-1,0), D3DXVECTOR4(0,0,1,1) );
pVertexBuffer->Unmap();
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
//get technique desc
D3D10_TECHNIQUE_DESC techDesc;
pBasicTechnique->GetDesc(&techDesc);
for(UINT p = 0; p < techDesc.Passes; ++p)
{
//apply technique
pBasicTechnique->GetPassByIndex(p)->Apply(0);
//draw
pD3DDevice->Draw(numVertices, 0);
}
pSwapChain->Present(0,0);
}
}
I'm not sure try to set:
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
after you unmap the the buffer. To get something like this:
pVertexBuffer->Unmap();
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
I suspect that locking blows avay buffer binding

Resources