Seems like my Pixel Shader is not being called - directx

I ve been messing around with DirectX and I ve been working on this weird idea of mine for some time now. It seems as if my pixel shader is not being invoked for some odd reason. I dont know if thats even possible or that its some stupid mistake of mine. The Code is as follows (I have copied most of it from the Tutorials.)
Shader (HLSL) Code as follows:
/*
********************************************************************************************************
* Constant Buffers (Can only change per render call).
********************************************************************************************************
*/
cbuffer SConstantBuffer : register( b0 )
{
float3 origin;
float3 direction;
float planeDistance;
int screenWidth;
int screenHeight;
};
/*
********************************************************************************************************
* Input and Output Structures
********************************************************************************************************
*/
struct VS_INPUT
{
float3 position : POSITION;
float4 color : COLOR;
};
struct VS_OUTPUT
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
/*
********************************************************************************************************
* Vertex Shader main
********************************************************************************************************
*/
VS_OUTPUT VS( VS_INPUT input )
{
VS_OUTPUT output = (VS_OUTPUT)0;
float param_d;
float3 pixelPoint;
float3 zAxis = { 0.0, 0.0, 1.0 };
float3 planeOrigin = origin + (direction * planeDistance);
float3 pointDirection = (input.position - origin) / length( (input.position - origin) );
param_d = dot( (planeOrigin - origin), direction ) / dot( pointDirection, direction );
pixelPoint = ( pointDirection * param_d ) + origin;
output.position.xyz = cross ( pixelPoint, direction );
output.position.x = output.position.x * 5.0;
output.position.y = output.position.y * 5.0;
output.position.z = 20.0;
output.position.w = output.position.z;
output.color = input.color;
return output;
}
/*
********************************************************************************************************
* Pixel Shader main
********************************************************************************************************
*/
float4 PS( VS_OUTPUT input ) : SV_Target
{
return float4( 1.0, 1.0, 1.0, 1.0); //input.color;
}
DirectX Init and Rendering code as Follows:
#include "Renderer.h"
CRenderer::CRenderer ( )
{
this->m_dxDevice = NULL;
this->m_dxDeviceContext = NULL;
this->m_dxRenderTargetView = NULL;
this->m_dxSwapChain = NULL;
this->m_dxBackBuffer = NULL;
this->m_dxVertexLayout = NULL;
this->m_dxVertexBuffer = NULL;
this->m_dxPixelShader = NULL;
this->m_dxConstantBuffer = NULL;
}
HRESULT CompileShaderFromFile( WCHAR* szFileName, LPCSTR szEntryPoint, LPCSTR szShaderModel, ID3DBlob** ppBlobOut )
{
HRESULT hr = S_OK;
DWORD dwShaderFlags = D3DCOMPILE_ENABLE_STRICTNESS;
#if defined( DEBUG ) || defined( _DEBUG )
// Set the D3DCOMPILE_DEBUG flag to embed debug information in the shaders.
// Setting this flag improves the shader debugging experience, but still allows
// the shaders to be optimized and to run exactly the way they will run in
// the release configuration of this program.
dwShaderFlags |= D3DCOMPILE_DEBUG;
#endif
ID3DBlob* pErrorBlob;
hr = D3DX11CompileFromFile( szFileName, NULL, NULL, szEntryPoint, szShaderModel,
dwShaderFlags, 0, NULL, ppBlobOut, &pErrorBlob, NULL );
if( FAILED(hr) )
{
if( pErrorBlob != NULL )
OutputDebugStringA( (char*)pErrorBlob->GetBufferPointer() );
if( pErrorBlob ) pErrorBlob->Release();
return hr;
}
if( pErrorBlob ) pErrorBlob->Release();
return S_OK;
}
HRESULT CRenderer::InitDevice ( HWND arg_hWnd )
{
HRESULT status = S_OK;
this->m_hWnd = arg_hWnd;
RECT rctWindowSize;
GetClientRect( arg_hWnd, &rctWindowSize );
UINT dWidth = rctWindowSize.right - rctWindowSize.left;
UINT dHeight = rctWindowSize.bottom - rctWindowSize.top;
UINT dCreateDeviceFlag = 0;
#ifdef _DEBUG
dCreateDeviceFlag |= D3D11_CREATE_DEVICE_DEBUG;
#endif _DEBUG
D3D_DRIVER_TYPE pDriverType [] =
{
D3D_DRIVER_TYPE_HARDWARE,
D3D_DRIVER_TYPE_WARP,
D3D_DRIVER_TYPE_REFERENCE
};
UINT numDriverType = ARRAYSIZE(pDriverType);
D3D_FEATURE_LEVEL pFeatureLevel [] =
{
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0
};
UINT numFeatureLevel = ARRAYSIZE(pFeatureLevel);
//Fillout Device Descriptor
DXGI_SWAP_CHAIN_DESC objSwapChainDesc;
ZeroMemory(&objSwapChainDesc, sizeof(objSwapChainDesc));
objSwapChainDesc.BufferCount = 1;
objSwapChainDesc.BufferDesc.Width = dWidth;
objSwapChainDesc.BufferDesc.Height = dHeight;
objSwapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
objSwapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
objSwapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
objSwapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
objSwapChainDesc.OutputWindow = this->m_hWnd;
objSwapChainDesc.SampleDesc.Count = 1;
objSwapChainDesc.SampleDesc.Quality = 0;
objSwapChainDesc.Windowed = true;
//Parse and create the appropriate type of device
for ( UINT driverTypeIndex = 0; driverTypeIndex < numDriverType; driverTypeIndex++ )
{
this->m_objDriverType = pDriverType[driverTypeIndex];
status = D3D11CreateDeviceAndSwapChain ( NULL,
this->m_objDriverType,
NULL,
dCreateDeviceFlag,
pFeatureLevel,
numFeatureLevel,
D3D11_SDK_VERSION,
&objSwapChainDesc,
&(this->m_dxSwapChain),
&(this->m_dxDevice),
&(this->m_objFeatureLevel),
&(this->m_dxDeviceContext) );
if ( SUCCEEDED(status) )
break;
}
if ( FAILED(status) )
return status;
//Create a render traget
this->m_dxBackBuffer = NULL;
status = this->m_dxSwapChain->GetBuffer ( 0, __uuidof(ID3D11Texture2D), (LPVOID*)(&this->m_dxBackBuffer) );
if ( FAILED(status) )
return status;
status = this->m_dxDevice->CreateRenderTargetView ( this->m_dxBackBuffer, NULL, &(this->m_dxRenderTargetView) );
if ( FAILED(status) )
return status;
//CreatDepth Stencil
D3D11_TEXTURE2D_DESC objDepthStencilDesc;
ZeroMemory(&objDepthStencilDesc, sizeof(objDepthStencilDesc));
D3D11_DEPTH_STENCIL_VIEW_DESC objDepthStencilViewDesc;
ZeroMemory(&objDepthStencilViewDesc, sizeof(objDepthStencilViewDesc));
objDepthStencilDesc.Width = dWidth;
objDepthStencilDesc.Height = dHeight;
objDepthStencilDesc.MipLevels = 1;
objDepthStencilDesc.ArraySize = 1;
objDepthStencilDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
objDepthStencilDesc.SampleDesc.Count = 1;
objDepthStencilDesc.SampleDesc.Quality = 0;
objDepthStencilDesc.Usage = D3D11_USAGE_DEFAULT;
objDepthStencilDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
objDepthStencilDesc.MiscFlags = 0;
objDepthStencilDesc.CPUAccessFlags = 0;
status = this->m_dxDevice->CreateTexture2D (
&objDepthStencilDesc,
NULL,
&(this->m_dxDepthStencilBuffer)
);
if ( FAILED(status) )
return status;
objDepthStencilViewDesc.Format = objDepthStencilDesc.Format;
objDepthStencilViewDesc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
objDepthStencilViewDesc.Texture2D.MipSlice = 0;
status = this->m_dxDevice->CreateDepthStencilView (
this->m_dxDepthStencilBuffer,
&objDepthStencilViewDesc,
&(this->m_dxDepthStencilView)
);
if ( FAILED(status) )
return status;
this->m_dxDeviceContext->OMSetRenderTargets ( 1, &(this->m_dxRenderTargetView), this->m_dxDepthStencilView );
//Set View Port (Can be commented out;
this->m_objViewport.Width = (FLOAT)dWidth;
this->m_objViewport.Height = (FLOAT)dHeight;
this->m_objViewport.MinDepth = 0.0;
this->m_objViewport.MaxDepth = 1.0;
this->m_objViewport.TopLeftX = 0;
this->m_objViewport.TopLeftY = 0;
this->m_dxDeviceContext->RSSetViewports ( 1, &(this->m_objViewport) );
//Setting up Shaders
//Compiling Vertex Shader file
ID3DBlob * dxVSBlob = NULL;
status = CompileShaderFromFile ( L"RTShaders.fx", "VS", "vs_4_0", &dxVSBlob );
if (FAILED (status))
{
dxVSBlob->Release ( );
return status;
}
//Creating Compiled Shader
status = this->m_dxDevice->CreateVertexShader ( dxVSBlob->GetBufferPointer ( ),
dxVSBlob->GetBufferSize ( ),
NULL,
&(this->m_dxVertexShader) );
if( FAILED(status) )
{
dxVSBlob->Release ( );
return status;
}
//Describe Layout
D3D11_INPUT_ELEMENT_DESC layout [] =
{
{
"POSITION",
0,
DXGI_FORMAT_R32G32B32_FLOAT,
0,
0,
D3D11_INPUT_PER_VERTEX_DATA,
0
},
/* {
"NORMAL",
0,
DXGI_FORMAT_R32G32B32A32_FLOAT,
0,
16,
D3D11_INPUT_PER_VERTEX_DATA,
0
},
*/ {
"COLOR",
0,
DXGI_FORMAT_R32G32B32_FLOAT,
0,
12, //NEED TO CHANGE THIS TO 32 WHEN UNCOMMENTING THE ABOVE BLOCK
D3D11_INPUT_PER_VERTEX_DATA,
0
},
};
UINT numElements = ARRAYSIZE(layout);
//Create Layout
status = this->m_dxDevice->CreateInputLayout ( layout,
numElements,
dxVSBlob->GetBufferPointer ( ),
dxVSBlob->GetBufferSize ( ),
&(this->m_dxVertexLayout) );
dxVSBlob->Release ( );
dxVSBlob = NULL;
if ( FAILED(status) )
{
return status;
}
//Set Vertex Layout to the pipeline
this->m_dxDeviceContext->IASetInputLayout ( this->m_dxVertexLayout );
//Compiling Pixel Shader File
ID3DBlob * dxPSBlob = NULL;
status = CompileShaderFromFile ( L"RTShaders.fx", "PS", "ps_4_0", &dxPSBlob );
if ( FAILED(status) )
{
if ( dxPSBlob )
dxPSBlob->Release ( );
return status;
}
//Creating Compiled Pixel Shader
status = this->m_dxDevice->CreatePixelShader (
dxPSBlob->GetBufferPointer ( ),
dxPSBlob->GetBufferSize ( ),
NULL,
&(this->m_dxPixelShader)
);
dxPSBlob->Release ( );
dxPSBlob = NULL;
if ( FAILED(status) )
{
return status;
}
return S_OK;
}
HRESULT CRenderer::CreateVertexBuffer ( )
{
HRESULT status = S_OK;
D3D11_BUFFER_DESC objBufferDesc;
D3D11_SUBRESOURCE_DATA objSubresData;
UINT dStride = sizeof (SVertex);
UINT dOffset = 0;
ZeroMemory(&objBufferDesc, sizeof(objBufferDesc));
ZeroMemory(&objSubresData, sizeof(objSubresData));
objBufferDesc.ByteWidth = sizeof(SVertex) * 1681;
objBufferDesc.Usage = D3D11_USAGE_DEFAULT;
objBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
objBufferDesc.CPUAccessFlags = 0;
objSubresData.pSysMem = pVertexData;
status = this->m_dxDevice->CreateBuffer (
&objBufferDesc,
&objSubresData,
&(this->m_dxVertexBuffer)
);
if ( FAILED(status) )
return status;
this->m_dxDeviceContext->IASetVertexBuffers ( 0, 1, &(this->m_dxVertexBuffer), &dStride, &dOffset );
this->m_dxDeviceContext->IASetPrimitiveTopology ( D3D11_PRIMITIVE_TOPOLOGY_POINTLIST );
return S_OK;
}
HRESULT CRenderer::CreateConstantBuffer ( )
{
HRESULT status = S_OK;
D3D11_BUFFER_DESC objBufferDesc;
ZeroMemory(&objBufferDesc, sizeof(objBufferDesc));
objBufferDesc.ByteWidth = sizeof(SConstantBuffer) + (16 - sizeof(SConstantBuffer)%16);
objBufferDesc.Usage = D3D11_USAGE_DEFAULT;
objBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
objBufferDesc.CPUAccessFlags = 0;
status = this->m_dxDevice->CreateBuffer (
&objBufferDesc,
NULL,
&(this->m_dxConstantBuffer)
);
return status;
}
HRESULT CRenderer::Render ( )
{
HRESULT status = S_OK;
RECT screenRect;
SConstantBuffer objConstBuffer;
float pClearColor [4] = { 0.0f, 0.125f, 0.5f, 1.0f };
this->m_dxDeviceContext->ClearRenderTargetView ( this->m_dxRenderTargetView, pClearColor );
this->m_dxDeviceContext->ClearDepthStencilView ( this->m_dxDepthStencilView, D3D11_CLEAR_DEPTH, 1.0f, 0 );
GetClientRect ( this->m_hWnd, &screenRect );
objConstBuffer.f4Origin = XMFLOAT3 ( 0.0f, 0.0f, 0.0f );
objConstBuffer.f4Direction = XMFLOAT3 ( 0.0f, 0.0f, 1.0f );
objConstBuffer.fDistanceFromPlane = 2.0f;
objConstBuffer.dScreenWidth = screenRect.right - screenRect.left;
objConstBuffer.dScreenHeight = screenRect.bottom - screenRect.top;
this->m_dxDeviceContext->UpdateSubresource ( this->m_dxConstantBuffer, 0, NULL, &objConstBuffer, 0, 0 );
this->m_dxDeviceContext->VSSetShader ( this->m_dxVertexShader, NULL, 0 );
this->m_dxDeviceContext->VSSetConstantBuffers ( 0, 1, &(this->m_dxConstantBuffer) );
this->m_dxDeviceContext->PSSetShader ( this->m_dxPixelShader, NULL, 0 );
this->m_dxDeviceContext->Draw ( 1680, 0 );
this->m_dxSwapChain->Present ( 0, 0 );
return status;
}
CRenderer::~CRenderer ( )
{
if ( this->m_dxDeviceContext )
this->m_dxDeviceContext->ClearState ( );
if ( this->m_dxPixelShader )
this->m_dxPixelShader->Release ( );
this->m_dxPixelShader = NULL;
if ( this->m_dxVertexLayout )
this->m_dxVertexLayout->Release ( );
this->m_dxVertexLayout = NULL;
if ( this->m_dxVertexBuffer )
this->m_dxVertexBuffer->Release ( );
this->m_dxVertexBuffer = NULL;
if ( this->m_dxDepthStencilBuffer )
this->m_dxDepthStencilBuffer->Release ( );
this->m_dxDepthStencilBuffer = NULL;
if ( this->m_dxDepthStencilView )
this->m_dxDepthStencilView->Release ( );
this->m_dxDepthStencilView = NULL;
if ( this->m_dxRenderTargetView )
this->m_dxRenderTargetView->Release ( );
this->m_dxRenderTargetView = NULL;
if ( this->m_dxBackBuffer->Release ( ) )
this->m_dxBackBuffer->Release ( );
this->m_dxBackBuffer = NULL;
if ( this->m_dxSwapChain )
this->m_dxSwapChain->Release ( );
this->m_dxSwapChain = NULL;
if ( this->m_dxDeviceContext )
this->m_dxDeviceContext->Release ( );
this->m_dxDeviceContext = NULL;
if ( this->m_dxDevice )
this->m_dxDevice->Release ( );
this->m_dxDevice = NULL;
}
Nothing Gets rendered on the screen, I should see while dots or a big white patch but all I see is the Render Target only Cleared. I ve been running Pix and the PreVS and PostVS transformations seem fine.
Please Help!

Related

DirectX 11 Render to Texture Issue

In my code, i am making 2 rectangles,
Rectangle1: Rendering On a Texture.
Rectangle2: Rendering On Back Buffer.
I am trying to do programmable blending,so need to access the destination pixel in pixel shader.
In my code,
I am creating a texture like below:
d3d11Device->CreateTexture2D(&textureDesc, NULL, &renderTargetTextureMap);
After this i am creating Render Target view of the texture.
d3d11Device->CreateRenderTargetView(renderTargetTextureMap, &renderTargetViewDesc, &renderTargetViewMap);
After this i am declaring vertex and pixel shader.
Then in my draw call,
i am performing following thing:
float bgColor[4] = {0.0f, 0.0f,0.0f, 1.0f };
d3d11DevCon->ClearRenderTargetView(renderTargetViewMap, bgColor);
float bgColor2[4] = { 0.0f, 1.0f, 0.0f, 1.0f };
////////////////////////////////////////////////Buffer 1///////////////////////////////////////////////////////////////
//Set the vertex buffer
UINT stride = sizeof(Vertex);
UINT offset = 0;
///////////////////////////////////////////////////////////Buffer 2//////////////////////////////////////////////////////////////////
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
I assume that since i have set my render target view as renderTargetViewMap, so my draw call will render to texture only.
Now i am rendering to my backbuffer:
////////////////////////////////////////////
d3d11DevCon->OMSetRenderTargets(1, &renderTargetView, NULL);
d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
//d3d11DevCon->ClearRenderTargetView(renderTargetView, bgColor2);
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer2, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer2, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
//Present the backbuffer to the screen
SwapChain->Present(0, 0);
So, in this way my rendering is happening.
Issue Face:
In my pixel shader,
VS_OUTPUT VS(float4 inPos : POSITION, float4 inColor : COLOR)
{
VS_OUTPUT output;
output.Pos = inPos;
output.Color = inColor;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
float2 temp;
temp = input.Pos;
float4 diffuse = ObjTexture.Sample(ObjSamplerState,0.5+0.5*temp);
return input.Color + diffuse;
}
Here the diffuse is comming out to be equal to my bgcolor which i have set when rendering to texture
float bgColor[4] = {0.0f, 0.0f,0.0f, 1.0f };
d3d11DevCon->ClearRenderTargetView(renderTargetViewMap, bgColor);
I have also drawn a rectangle on it, but those pixels i am not able to access.
How can i access the pixel of rectangle that i have drawn on rendering to texture.
This is Issue Image
Desired Result
Shader File: Effect.fx
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
Texture2D ObjTexture;
SamplerState ObjSamplerState;
VS_OUTPUT VS(float4 inPos : POSITION, float4 inColor : COLOR)
{
VS_OUTPUT output;
output.Pos = inPos;
output.Color = inColor;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
float2 temp;
temp = input.Pos;
float4 diffuse = ObjTexture.Sample(ObjSamplerState,0.5+0.5*temp);
return input.Color + diffuse;
}
Edit - 1:
With my latest change in code, i am able to blend my rectangle 2 with rectangle 1, but one issue i am facing is that when i blend then major part of my rectangle 2 is changes to yellow (red + green) on edges only i am able to see the actual green color.
Modified code:
main.cpp
//Include and link appropriate libraries and headers//
#pragma comment(lib, "d3d11.lib")
#pragma comment(lib, "d3dx11.lib")
#pragma comment(lib, "d3dx10.lib")
#include <windows.h>
#include <d3d11.h>
#include <d3dx11.h>
#include <d3dx10.h>
#include <xnamath.h>
//Global Declarations - Interfaces//
IDXGISwapChain* SwapChain;
ID3D11Device* d3d11Device;
ID3D11DeviceContext* d3d11DevCon;
ID3D11RenderTargetView* renderTargetView;
ID3D11Buffer* triangleVertBuffer;
ID3D11Buffer* triangleVertBuffer2;
ID3D11VertexShader* VS;
ID3D11PixelShader* PS;
ID3D10Blob* VS_Buffer;
ID3D10Blob* PS_Buffer;
ID3D11InputLayout* vertLayout;
XMMATRIX mapView;
XMMATRIX mapProjection;
XMVECTOR DefaultForward = XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
//Global Declarations - Others//
LPCTSTR WndClassName = L"firstwindow";
HWND hwnd = NULL;
HRESULT hr;
const int Width = 800;
const int Height = 600;
bool InitializeDirect3d11App(HINSTANCE hInstance)
{
//Describe our Buffer
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
bufferDesc.Width = Width;
bufferDesc.Height = Height;
bufferDesc.RefreshRate.Numerator = 60;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
//Describe our SwapChain
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 1;
swapChainDesc.OutputWindow = hwnd;
swapChainDesc.Windowed = TRUE;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
//Create our SwapChain
hr = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, NULL, NULL,
D3D11_SDK_VERSION, &swapChainDesc, &SwapChain, &d3d11Device, NULL, &d3d11DevCon);
//Create our BackBuffer
ID3D11Texture2D* BackBuffer;
hr = SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&BackBuffer);
//Create our Render Target
hr = d3d11Device->CreateRenderTargetView(BackBuffer, NULL, &renderTargetView);
BackBuffer->Release();
////////////////////////////////////////////////////////////////////////EXPERIMENT AREA//////////////////////////////////////////////////////////////////////////////////////
ZeroMemory(&textureDesc, sizeof(textureDesc));
// Setup the texture description.
// We will have our map be a square
// We will need to have this texture bound as a render target AND a shader resource
textureDesc.Width = Width/ 3.9729999999999999999999999999999;
textureDesc.Height = Height/3.9729999999999999999999999999999;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = 0;
d3d11Device->CreateTexture2D(&textureDesc, NULL, &renderTargetTextureMap);
// Setup the description of the render target view.
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
// Create the render target view.
d3d11Device->CreateRenderTargetView(renderTargetTextureMap, &renderTargetViewDesc, &renderTargetViewMap);
/////////////////////// Map's Shader Resource View
// Setup the description of the shader resource view.
shaderResourceViewDesc.Format = textureDesc.Format;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
// Create the shader resource view.
d3d11Device->CreateShaderResourceView(renderTargetTextureMap, &shaderResourceViewDesc, &shaderResourceViewMap);
d3d11DevCon->OMSetRenderTargets(1, &renderTargetViewMap, NULL);
d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
return true;
}
void CleanUp()
{
//Release the COM Objects we created
SwapChain->Release();
d3d11Device->Release();
d3d11DevCon->Release();
renderTargetView->Release();
triangleVertBuffer->Release();
VS->Release();
PS->Release();
VS_Buffer->Release();
PS_Buffer->Release();
vertLayout->Release();
}
bool InitScene()
{
//Compile Shaders from shader file
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "VS", "vs_5_0", 0, 0, 0, &VS_Buffer, 0, 0);
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "PS", "ps_5_0", 0, 0, 0, &PS_Buffer, 0, 0);
//Create the Shader Objects
hr = d3d11Device->CreateVertexShader(VS_Buffer->GetBufferPointer(), VS_Buffer->GetBufferSize(), NULL, &VS);
hr = d3d11Device->CreatePixelShader(PS_Buffer->GetBufferPointer(), PS_Buffer->GetBufferSize(), NULL, &PS);
//Set Vertex and Pixel Shaders
d3d11DevCon->VSSetShader(VS, 0, 0);
d3d11DevCon->PSSetShader(PS, 0, 0);
//Create the Input Layout
hr = d3d11Device->CreateInputLayout(layout, numElements, VS_Buffer->GetBufferPointer(),
VS_Buffer->GetBufferSize(), &vertLayout);
//Set the Input Layout
d3d11DevCon->IASetInputLayout(vertLayout);
//Set Primitive Topology
d3d11DevCon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
//Create the Viewport
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = 800;
viewport.Height = 600;
//Set the Viewport
d3d11DevCon->RSSetViewports(1, &viewport);
////////////////***********************First Texture Vertex Buffer *******************************/////////////////////////////
//Create the vertex buffer
Vertex v[] =
{
Vertex(-0.5f, -0.5f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(-0.5f, 0.5f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(0.5f, 0.5f, 0.0f, 1.0f, 0.0f,0.0f, 1.0f),
Vertex(0.5f, -0.5f, 0.0f, 1.0f,0.0f, 0.0f, 1.0f),
};
DWORD indices[] = {
// Front Face
0, 1, 3,
1, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc;
ZeroMemory(&indexBufferDesc, sizeof(indexBufferDesc));
indexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc.CPUAccessFlags = 0;
indexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData;
iinitData.pSysMem = indices;
d3d11Device->CreateBuffer(&indexBufferDesc, &iinitData, &d2dIndexBuffer);
D3D11_BUFFER_DESC vertexBufferDesc;
ZeroMemory(&vertexBufferDesc, sizeof(vertexBufferDesc));
vertexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc.CPUAccessFlags = 0;
vertexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData;
ZeroMemory(&vertexBufferData, sizeof(vertexBufferData));
vertexBufferData.pSysMem = v;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc, &vertexBufferData, &triangleVertBuffer);
////////////////////////////////////////////////////// Second Vertex.
Vertex v2[] = {
// positions // colors // texture coords
Vertex(1.0f, 1.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // top right
Vertex(1.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom right
Vertex(0.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom left
Vertex(0.0f, 1.0, 0.0f,0.0f, 1.0f,0.0f,1.0f) // top left
};
DWORD indices2[] = {
// Front Face
0, 1, 2,
0, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc2;
ZeroMemory(&indexBufferDesc2, sizeof(indexBufferDesc2));
indexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc2.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc2.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc2.CPUAccessFlags = 0;
indexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData2;
iinitData2.pSysMem = indices2;
d3d11Device->CreateBuffer(&indexBufferDesc2, &iinitData2, &d2dIndexBuffer2);
D3D11_BUFFER_DESC vertexBufferDesc2;
ZeroMemory(&vertexBufferDesc2, sizeof(vertexBufferDesc2));
vertexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc2.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc2.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc2.CPUAccessFlags = 0;
vertexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData2;
ZeroMemory(&vertexBufferData2, sizeof(vertexBufferData2));
vertexBufferData2.pSysMem = v2;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc2, &vertexBufferData2, &triangleVertBuffer2);
UINT stride = sizeof(Vertex);
UINT offset = 0;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
return true;
}
Shader File:
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
Texture2D ObjTexture;
SamplerState ObjSamplerState;
VS_OUTPUT VS(float4 inPos : POSITION, float4 inColor : COLOR)
{
VS_OUTPUT output;
output.Pos = inPos;
output.Color = inColor;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
float2 temp;
temp = input.Pos;
float4 diffuse = ObjTexture.Sample(ObjSamplerState,0.5*temp);
return input.Color + diffuse ;
}
return DefWindowProc(hwnd,
msg,
wParam,
lParam);
}
Edit 3:
My vertex Structure:
struct Vertex //Overloaded Vertex Structure
{
Vertex() {}
Vertex(float x, float y, float z,
float cr, float cg, float cb, float ca)
: pos(x, y, z), color(cr, cg, cb, ca) {}
XMFLOAT3 pos;
XMFLOAT4 color;
};
Input description:
D3D11_INPUT_ELEMENT_DESC layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
I think so issue is when i pass my texture and read it in diffuse in pixel shader, then mapping of my texture on back buffer is not same and hence i am only finding red color from 2nd scan line and hence resultant zero is produced..?
Edit 3:
My initialization code:
struct Vertex //Overloaded Vertex Structure
{
Vertex() {}
Vertex(float x, float y, float z,
float cr, float cg, float cb, float ca)
: pos(x, y, z), color(cr, cg, cb, ca) {}
XMFLOAT3 pos;
XMFLOAT4 color;
};
ID3D11Texture2D* renderTargetTextureMap;
ID3D11RenderTargetView* renderTargetViewMap;
ID3D11ShaderResourceView* shaderResourceViewMap;
ID3D11SamplerState* CubesTexSamplerState;
ID3D11Buffer *d2dIndexBuffer;
ID3D11Buffer *d2dIndexBuffer2;
D3D11_TEXTURE2D_DESC textureDesc;
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc;
D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc;
D3D11_INPUT_ELEMENT_DESC layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
UINT numElements = ARRAYSIZE(layout);
bool InitializeDirect3d11App(HINSTANCE hInstance)
{
//Describe our Buffer
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
bufferDesc.Width = Width;
bufferDesc.Height = Height;
bufferDesc.RefreshRate.Numerator = 60;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
//Describe our SwapChain
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 1;
swapChainDesc.OutputWindow = hwnd;
swapChainDesc.Windowed = TRUE;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
//Create our SwapChain
hr = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, NULL, NULL,
D3D11_SDK_VERSION, &swapChainDesc, &SwapChain, &d3d11Device, NULL, &d3d11DevCon);
//Create our BackBuffer
ID3D11Texture2D* BackBuffer;
hr = SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&BackBuffer);
//Create our Render Target
hr = d3d11Device->CreateRenderTargetView(BackBuffer, NULL, &renderTargetView);
BackBuffer->Release();
////////////////////////////////////////////////////////////////////////EXPERIMENT AREA//////////////////////////////////////////////////////////////////////////////////////
ZeroMemory(&textureDesc, sizeof(textureDesc));
// Setup the texture description.
// We will have our map be a square
// We will need to have this texture bound as a render target AND a shader resource
textureDesc.Width = Width/2;
textureDesc.Height = Height/2;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = 0;
d3d11Device->CreateTexture2D(&textureDesc, NULL, &renderTargetTextureMap);
// Setup the description of the render target view.
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
// Create the render target view.
d3d11Device->CreateRenderTargetView(renderTargetTextureMap, &renderTargetViewDesc, &renderTargetView);
/////////////////////// Map's Shader Resource View
// Setup the description of the shader resource view.
shaderResourceViewDesc.Format = textureDesc.Format;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
// Create the shader resource view.
d3d11Device->CreateShaderResourceView(renderTargetTextureMap, &shaderResourceViewDesc, &shaderResourceViewMap);
//d3d11DevCon->OMSetRenderTargets(1, &renderTargetViewMap, NULL);
//d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
return true;
}
void CleanUp()
{
//Release the COM Objects we created
SwapChain->Release();
d3d11Device->Release();
d3d11DevCon->Release();
renderTargetView->Release();
triangleVertBuffer->Release();
VS->Release();
PS->Release();
VS_Buffer->Release();
PS_Buffer->Release();
vertLayout->Release();
}
bool InitScene()
{
//Compile Shaders from shader file
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "VS", "vs_5_0", 0, 0, 0, &VS_Buffer, 0, 0);
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "PS", "ps_5_0", 0, 0, 0, &PS_Buffer, 0, 0);
//Create the Shader Objects
hr = d3d11Device->CreateVertexShader(VS_Buffer->GetBufferPointer(), VS_Buffer->GetBufferSize(), NULL, &VS);
hr = d3d11Device->CreatePixelShader(PS_Buffer->GetBufferPointer(), PS_Buffer->GetBufferSize(), NULL, &PS);
//Set Vertex and Pixel Shaders
d3d11DevCon->VSSetShader(VS, 0, 0);
d3d11DevCon->PSSetShader(PS, 0, 0);
//Create the Input Layout
hr = d3d11Device->CreateInputLayout(layout, numElements, VS_Buffer->GetBufferPointer(),
VS_Buffer->GetBufferSize(), &vertLayout);
//Set the Input Layout
d3d11DevCon->IASetInputLayout(vertLayout);
//Set Primitive Topology
d3d11DevCon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
//Create the Viewport
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = 800;
viewport.Height = 600;
//Set the Viewport
d3d11DevCon->RSSetViewports(1, &viewport);
////////////////***********************First Texture Vertex Buffer *******************************/////////////////////////////
//Create the vertex buffer
Vertex v[] =
{
Vertex(-0.35f, -0.35f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(-0.35f, 0.35f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(0.35f, 0.35f, 0.0f, 1.0f, 0.0f,0.0f, 1.0f),
Vertex(0.35f, -0.35f, 0.0f, 1.0f,0.0f, 0.0f, 1.0f),
};
DWORD indices[] = {
// Front Face
0, 1, 3,
1, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc;
ZeroMemory(&indexBufferDesc, sizeof(indexBufferDesc));
indexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc.CPUAccessFlags = 0;
indexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData;
iinitData.pSysMem = indices;
d3d11Device->CreateBuffer(&indexBufferDesc, &iinitData, &d2dIndexBuffer);
D3D11_BUFFER_DESC vertexBufferDesc;
ZeroMemory(&vertexBufferDesc, sizeof(vertexBufferDesc));
vertexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc.CPUAccessFlags = 0;
vertexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData;
ZeroMemory(&vertexBufferData, sizeof(vertexBufferData));
vertexBufferData.pSysMem = v;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc, &vertexBufferData, &triangleVertBuffer);
////////////////////////////////////////////////////// Second Vertex.
Vertex v2[] = {
// positions // colors // texture coords
Vertex(1.0f, 1.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // top right
Vertex(1.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom right
Vertex(0.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom left
Vertex(0.0f, 1.0, 0.0f,0.0f, 1.0f,0.0f,1.0f) // top left
};
DWORD indices2[] = {
// Front Face
0, 1, 2,
0, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc2;
ZeroMemory(&indexBufferDesc2, sizeof(indexBufferDesc2));
indexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc2.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc2.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc2.CPUAccessFlags = 0;
indexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData2;
iinitData2.pSysMem = indices2;
d3d11Device->CreateBuffer(&indexBufferDesc2, &iinitData2, &d2dIndexBuffer2);
D3D11_BUFFER_DESC vertexBufferDesc2;
ZeroMemory(&vertexBufferDesc2, sizeof(vertexBufferDesc2));
vertexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc2.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc2.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc2.CPUAccessFlags = 0;
vertexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData2;
ZeroMemory(&vertexBufferData2, sizeof(vertexBufferData2));
vertexBufferData2.pSysMem = v2;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc2, &vertexBufferData2, &triangleVertBuffer2);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
return true;
}
Edit 4:
Added draw scene code:
void DrawScene()
{
//Set the vertex buffer
UINT stride = sizeof(Vertex);
UINT offset = 0;
///////////////////////////////////////////////////////////Buffer 2//////////////////////////////////////////////////////////////////
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
d3d11DevCon->OMSetRenderTargets(1, &renderTargetView, NULL);
////////////////////////////////////////////
d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer2, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer2, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
//Present the backbuffer to the screen
SwapChain->Present(0, 0);
}
In order to calculate correct texture coordinates from the vertex Position, you need to:
1.Create the render target texture with Width and Height (not the halves):
textureDesc.Width = Width; // /2; // Do not use half width
textureDesc.Height = Height; // /2; // Do not use half height
2.Divide the position to a float2(1 / Width, 1 / Height) in your pixel shader, like this:
float2 tex = input.Pos * float2(1.0f / 800.0f, 1.0f / 600.0f);
float4 diffuse = ObjTexture.Sample(ObjSamplerState, tex);
return input.Color + diffuse;

How to increase haar detector's window size in OpenCV

I am using the code available in this website: http://nashruddin.com/OpenCV_Face_Detection to do face detection.
I would like to increase the size of the detected face region. I am not sure how to do it. Need some help on it..
The code i am using is this:
//
#include "stdafx.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
void detectFaces( IplImage *img );
int main( int argc, char** argv )
{
CvCapture *capture;
IplImage *frame;
int key;
char *filename = "C:/OpenCV2.1/data/haarcascades/haarcascade_frontalface_alt.xml";
cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
storage = cvCreateMemStorage( 0 );
capture = cvCaptureFromCAM( 0 );
assert( cascade && storage && capture );
cvNamedWindow( "video", 1 );
while( key != 'q' ) {
frame = cvQueryFrame( capture );
if( !frame ) {
fprintf( stderr, "Cannot query frame!\n" );
break;
}
cvFlip( frame, frame, -1 );
frame->origin = 0;
detectFaces( frame );
key = cvWaitKey( 10 );
}
cvReleaseCapture( &capture );
cvDestroyWindow( "video" );
cvReleaseHaarClassifierCascade( &cascade );
cvReleaseMemStorage( &storage );
return 0;
}
void detectFaces( IplImage *img )
{
int i;
CvSeq *faces = cvHaarDetectObjects(
img,
cascade,
storage,
1.1,
3,
0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
cvSize( 40, 40 ) );
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
cvShowImage( "video", img );
}
This increases the size of the rectangle around the face. If you meant increasing the haar detector's window size, please update your question.
int padding_width = 30; // pixels
int padding_height = 30; // pixels
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
// Yes yes, all of this could be written much more compactly.
// It was written like this for clarity.
int topleft_x = r->x - (padding_width / 2);
int topleft_y = r->y - (padding_height / 2);
if (topleft_x < 0)
topleft_x = 0;
if (topleft_y < 0)
topleft_y = 0;
int bottomright_x = r->x + r->width + (padding_width / 2);
int bottomright_y = r->y + r->height + (padding_height / 2);
if (bottomright_x >= img->width)
bottomright_x = img->width - 1;
if (bottomright_y >= img->height)
bottomright_y = img->height - 1;
cvRectangle( img,
cvPoint(topleft_x, topleft_y),
cvPoint(bottomright_x, bottomright_y),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}

How to get a "Glow" shader effect in OpenGL ES 2.0?

I'm writing a 3D app for iOS. I'm new to OpenGL ES 2.0, so I'm still getting myself around writing basic shaders.
I really need to implement a "Glow" effect on some of my models, based on the texturing.
Here's a sample:
.
I'm looking for code examples for OpenGL ES 2.0. Most code I find on the internet is either for desktop OpenGL or D3D.
Any ideas?
First of all there are tons of algorithms and techniques to generate a glow effect.
I just want to present one possibility.
Create a Material that is luminescent.
For this I use a modified Blinn-Phong light model, where the direction to the light source is always the inverse direction of the normal vector of the fragment.
varying vec3 vertPos;
varying vec3 vertNV;
varying vec3 vertCol;
uniform float u_glow;
void main()
{
vec3 color = vertCol;
float shininess = 10.0;
vec3 normalV = normalize( vertNV );
vec3 eyeV = normalize( -vertPos );
vec3 halfV = normalize( eyeV + normalV );
float NdotH = max( 0.0, dot( normalV, halfV ) );
float glowFac = ( shininess + 2.0 ) * pow( NdotH, shininess ) / ( 2.0 * 3.14159265 );
gl_FragColor = vec4( u_glow * (0.1 + color.rgb * glowFac * 0.5), 1.0 );
}
In a second step a gaussian blur algorithm is performed on the output. The scene is written to frame buffer with a texture bound to the color plane. A screen space pass uses the texture as the input to blur the output.
For performance reasons, the blur algorithm is first performed along the X-axis of the viewport and in a further step along the Y-axis of the viewport.
varying vec2 vertPos;
uniform sampler2D u_textureCol;
uniform vec2 u_textureSize;
uniform float u_sigma;
uniform int u_width;
float CalcGauss( float x, float sigma )
{
float coeff = 1.0 / (2.0 * 3.14157 * sigma);
float expon = -(x*x) / (2.0 * sigma);
return (coeff*exp(expon));
}
void main()
{
vec2 texC = vertPos.st * 0.5 + 0.5;
vec4 texCol = texture( u_textureCol, texC );
vec4 gaussCol = vec4( texCol.rgb, 1.0 );
vec2 step = 1.0 / u_textureSize;
for ( int i = 1; i <= u_width; ++ i )
{
vec2 actStep = vec2( float(i) * step.x, 0.0 ); // this is for the X-axis
// vec2 actStep = vec2( 0.0, float(i) * step.y ); this would be for the Y-axis
float weight = CalcGauss( float(i) / float(u_width), u_sigma );
texCol = texture2D( u_textureCol, texC + actStep );
gaussCol += vec4( texCol.rgb * weight, weight );
texCol = texture2D( u_textureCol, texC - actStep );
gaussCol += vec4( texCol.rgb * weight, weight );
}
gaussCol.rgb /= gaussCol.w;
gl_FragColor = vec4( gaussCol.rgb, 1.0 );
}
For the implementation of a blur algorithm see also the answer to the questions:
OpenGL es 2.0 Gaussian blur on triangle
What kind of blurs can be implemented in pixel shaders?
See the following similar WebGL example which puts all together:
var readInput = true;
function changeEventHandler(event){
readInput = true;
}
(function loadscene() {
var resize, gl, progDraw, progBlurX, progPost, vp_size, blurFB;
var bufCube = {};
var bufQuad = {};
var shininess = 10.0;
var glow = 10.0;
var sigma = 0.8;
function render(delteMS){
//if ( readInput ) {
readInput = false;
var sliderScale = 100;
shininess = document.getElementById( "shine" ).value;
glow = document.getElementById( "glow" ).value / sliderScale;
sigma = document.getElementById( "sigma" ).value / sliderScale;
//}
Camera.create();
Camera.vp = vp_size;
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// set up framebuffer
gl.bindFramebuffer( gl.FRAMEBUFFER, blurFB[0] );
gl.viewport( 0, 0, blurFB[0].width, blurFB[0].height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// set up draw shader
ShaderProgram.Use( progDraw.prog );
ShaderProgram.SetUniformM44( progDraw.prog, "u_projectionMat44", Camera.Perspective() );
ShaderProgram.SetUniformM44( progDraw.prog, "u_viewMat44", Camera.LookAt() );
var modelMat = IdentityMat44()
modelMat = RotateAxis( modelMat, CalcAng( delteMS, 13.0 ), 0 );
modelMat = RotateAxis( modelMat, CalcAng( delteMS, 17.0 ), 1 );
ShaderProgram.SetUniformM44( progDraw.prog, "u_modelMat44", modelMat );
ShaderProgram.SetUniformF1( progDraw.prog, "u_shininess", shininess );
ShaderProgram.SetUniformF1( progDraw.prog, "u_glow", glow );
// draw scene
VertexBuffer.Draw( bufCube );
// set blur-X framebuffer and bind frambuffer texture
gl.bindFramebuffer( gl.FRAMEBUFFER, blurFB[1] );
gl.viewport( 0, 0, blurFB[1].width, blurFB[1].height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
var texUnit = 1;
gl.activeTexture( gl.TEXTURE0 + texUnit );
gl.bindTexture( gl.TEXTURE_2D, blurFB[0].color0_texture );
// set up blur-X shader
ShaderProgram.Use( progBlurX.prog );
ShaderProgram.SetUniformI1( progBlurX.prog , "u_texture", texUnit )
ShaderProgram.SetUniformF2( progBlurX.prog , "u_textureSize", vp_size );
ShaderProgram.SetUniformF1( progBlurX.prog , "u_sigma", sigma )
// draw full screen space
gl.enableVertexAttribArray( progBlurX.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.vertexAttribPointer( progBlurX.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.drawElements( gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progBlurX.inPos );
// reset framebuffer and bind frambuffer texture
gl.bindFramebuffer( gl.FRAMEBUFFER, null );
gl.viewport( 0, 0, vp_size[0], vp_size[1] );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
texUnit = 2;
gl.activeTexture( gl.TEXTURE0 + texUnit );
gl.bindTexture( gl.TEXTURE_2D, blurFB[1].color0_texture );
// set up pst process shader
ShaderProgram.Use( progPost.prog );
ShaderProgram.SetUniformI1( progPost.prog, "u_texture", texUnit )
ShaderProgram.SetUniformF2( progPost.prog, "u_textureSize", vp_size );
ShaderProgram.SetUniformF1( progPost.prog, "u_sigma", sigma );
// draw full screen space
gl.enableVertexAttribArray( progPost.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.vertexAttribPointer( progPost.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.drawElements( gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progPost.inPos );
requestAnimationFrame(render);
}
function resize() {
//vp_size = [gl.drawingBufferWidth, gl.drawingBufferHeight];
vp_size = [window.innerWidth, window.innerHeight]
canvas.width = vp_size[0];
canvas.height = vp_size[1];
var fbsize = Math.max(vp_size[0], vp_size[1])-1;
fbsize = 1 << 31 - Math.clz32(fbsize); // nearest power of 2
fbsize = fbsize * 2
blurFB = [];
for ( var i = 0; i < 2; ++ i ) {
fb = gl.createFramebuffer();
fb.width = fbsize;
fb.height = fbsize;
gl.bindFramebuffer( gl.FRAMEBUFFER, fb );
fb.color0_texture = gl.createTexture();
gl.bindTexture( gl.TEXTURE_2D, fb.color0_texture );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST );
gl.texParameteri( gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST );
gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, fb.width, fb.height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null );
fb.renderbuffer = gl.createRenderbuffer();
gl.bindRenderbuffer( gl.RENDERBUFFER, fb.renderbuffer );
gl.renderbufferStorage( gl.RENDERBUFFER, gl.DEPTH_COMPONENT16, fb.width, fb.height );
gl.framebufferTexture2D( gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, fb.color0_texture, 0 );
gl.framebufferRenderbuffer( gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.RENDERBUFFER, fb.renderbuffer );
gl.bindTexture( gl.TEXTURE_2D, null );
gl.bindRenderbuffer( gl.RENDERBUFFER, null );
gl.bindFramebuffer( gl.FRAMEBUFFER, null );
blurFB.push( fb );
}
}
function initScene() {
canvas = document.getElementById( "canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return null;
progDraw = {}
progDraw.prog = ShaderProgram.Create(
[ { source : "draw-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "draw-shader-fs", stage : gl.FRAGMENT_SHADER }
] );
if ( !progDraw.prog )
return null;
progDraw.inPos = gl.getAttribLocation( progDraw.prog, "inPos" );
progDraw.inNV = gl.getAttribLocation( progDraw.prog, "inNV" );
progDraw.inCol = gl.getAttribLocation( progDraw.prog, "inCol" );
progBlurX = {}
progBlurX.prog = ShaderProgram.Create(
[ { source : "post-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "blurX-shader-fs", stage : gl.FRAGMENT_SHADER }
] );
progBlurX.inPos = gl.getAttribLocation( progBlurX.prog, "inPos" );
if ( !progBlurX.prog )
return;
progPost = {}
progPost.prog = ShaderProgram.Create(
[ { source : "post-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "blurY-shader-fs", stage : gl.FRAGMENT_SHADER }
] );
progPost.inPos = gl.getAttribLocation( progPost.prog, "inPos" );
if ( !progPost.prog )
return;
// create cube
var cubePos = [
-1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0,
-1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0 ];
var cubeCol = [ 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
var cubeHlpInx = [ 0, 1, 2, 3, 1, 5, 6, 2, 5, 4, 7, 6, 4, 0, 3, 7, 3, 2, 6, 7, 1, 0, 4, 5 ];
var cubePosData = [];
for ( var i = 0; i < cubeHlpInx.length; ++ i ) {
cubePosData.push( cubePos[cubeHlpInx[i]*3], cubePos[cubeHlpInx[i]*3+1], cubePos[cubeHlpInx[i]*3+2] );
}
var cubeNVData = [];
for ( var i1 = 0; i1 < cubeHlpInx.length; i1 += 4 ) {
var nv = [0, 0, 0];
for ( i2 = 0; i2 < 4; ++ i2 ) {
var i = i1 + i2;
nv[0] += cubePosData[i*3]; nv[1] += cubePosData[i*3+1]; nv[2] += cubePosData[i*3+2];
}
for ( i2 = 0; i2 < 4; ++ i2 )
cubeNVData.push( nv[0], nv[1], nv[2] );
}
var cubeColData = [];
for ( var is = 0; is < 6; ++ is ) {
for ( var ip = 0; ip < 4; ++ ip ) {
cubeColData.push( cubeCol[is*3], cubeCol[is*3+1], cubeCol[is*3+2] );
}
}
var cubeInxData = [];
for ( var i = 0; i < cubeHlpInx.length; i += 4 ) {
cubeInxData.push( i, i+1, i+2, i, i+2, i+3 );
}
bufCube = VertexBuffer.Create(
[ { data : cubePosData, attrSize : 3, attrLoc : progDraw.inPos },
{ data : cubeNVData, attrSize : 3, attrLoc : progDraw.inNV },
{ data : cubeColData, attrSize : 3, attrLoc : progDraw.inCol } ],
cubeInxData );
bufQuad.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufQuad.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( [ -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0 ] ), gl.STATIC_DRAW );
bufQuad.inx = gl.createBuffer();
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufQuad.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( [ 0, 1, 2, 0, 2, 3 ] ), gl.STATIC_DRAW );
window.onresize = resize;
resize();
requestAnimationFrame(render);
}
function Fract( val ) {
return val - Math.trunc( val );
}
function CalcAng( deltaTime, intervall ) {
return Fract( deltaTime / (1000*intervall) ) * 2.0 * Math.PI;
}
function CalcMove( deltaTime, intervall, range ) {
var pos = self.Fract( deltaTime / (1000*intervall) ) * 2.0
var pos = pos < 1.0 ? pos : (2.0-pos)
return range[0] + (range[1] - range[0]) * pos;
}
function EllipticalPosition( a, b, angRag ) {
var a_b = a * a - b * b
var ea = (a_b <= 0) ? 0 : Math.sqrt( a_b );
var eb = (a_b >= 0) ? 0 : Math.sqrt( -a_b );
return [ a * Math.sin( angRag ) - ea, b * Math.cos( angRag ) - eb, 0 ];
}
glArrayType = typeof Float32Array !="undefined" ? Float32Array : ( typeof WebGLFloatArray != "undefined" ? WebGLFloatArray : Array );
function IdentityMat44() {
var m = new glArrayType(16);
m[0] = 1; m[1] = 0; m[2] = 0; m[3] = 0;
m[4] = 0; m[5] = 1; m[6] = 0; m[7] = 0;
m[8] = 0; m[9] = 0; m[10] = 1; m[11] = 0;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
return m;
};
function RotateAxis(matA, angRad, axis) {
var aMap = [ [1, 2], [2, 0], [0, 1] ];
var a0 = aMap[axis][0], a1 = aMap[axis][1];
var sinAng = Math.sin(angRad), cosAng = Math.cos(angRad);
var matB = new glArrayType(16);
for ( var i = 0; i < 16; ++ i ) matB[i] = matA[i];
for ( var i = 0; i < 3; ++ i ) {
matB[a0*4+i] = matA[a0*4+i] * cosAng + matA[a1*4+i] * sinAng;
matB[a1*4+i] = matA[a0*4+i] * -sinAng + matA[a1*4+i] * cosAng;
}
return matB;
}
function Cross( a, b ) { return [ a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0], 0.0 ]; }
function Dot( a, b ) { return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]; }
function Normalize( v ) {
var len = Math.sqrt( v[0] * v[0] + v[1] * v[1] + v[2] * v[2] );
return [ v[0] / len, v[1] / len, v[2] / len ];
}
var Camera = {};
Camera.create = function() {
this.pos = [0, 3, 0.0];
this.target = [0, 0, 0];
this.up = [0, 0, 1];
this.fov_y = 90;
this.vp = [800, 600];
this.near = 0.5;
this.far = 100.0;
}
Camera.Perspective = function() {
var fn = this.far + this.near;
var f_n = this.far - this.near;
var r = this.vp[0] / this.vp[1];
var t = 1 / Math.tan( Math.PI * this.fov_y / 360 );
var m = IdentityMat44();
m[0] = t/r; m[1] = 0; m[2] = 0; m[3] = 0;
m[4] = 0; m[5] = t; m[6] = 0; m[7] = 0;
m[8] = 0; m[9] = 0; m[10] = -fn / f_n; m[11] = -1;
m[12] = 0; m[13] = 0; m[14] = -2 * this.far * this.near / f_n; m[15] = 0;
return m;
}
Camera.LookAt = function() {
var mz = Normalize( [ this.pos[0]-this.target[0], this.pos[1]-this.target[1], this.pos[2]-this.target[2] ] );
var mx = Normalize( Cross( this.up, mz ) );
var my = Normalize( Cross( mz, mx ) );
var tx = Dot( mx, this.pos );
var ty = Dot( my, this.pos );
var tz = Dot( [-mz[0], -mz[1], -mz[2]], this.pos );
var m = IdentityMat44();
m[0] = mx[0]; m[1] = my[0]; m[2] = mz[0]; m[3] = 0;
m[4] = mx[1]; m[5] = my[1]; m[6] = mz[1]; m[7] = 0;
m[8] = mx[2]; m[9] = my[2]; m[10] = mz[2]; m[11] = 0;
m[12] = tx; m[13] = ty; m[14] = tz; m[15] = 1;
return m;
}
var ShaderProgram = {};
ShaderProgram.Create = function( shaderList ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.CompileShader( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
var progObj = this.LinkProgram( shaderObjs )
if ( progObj != 0 ) {
progObj.attribIndex = {};
var noOfAttributes = gl.getProgramParameter( progObj, gl.ACTIVE_ATTRIBUTES );
for ( var i_n = 0; i_n < noOfAttributes; ++ i_n ) {
var name = gl.getActiveAttrib( progObj, i_n ).name;
progObj.attribIndex[name] = gl.getAttribLocation( progObj, name );
}
progObj.unifomLocation = {};
var noOfUniforms = gl.getProgramParameter( progObj, gl.ACTIVE_UNIFORMS );
for ( var i_n = 0; i_n < noOfUniforms; ++ i_n ) {
var name = gl.getActiveUniform( progObj, i_n ).name;
progObj.unifomLocation[name] = gl.getUniformLocation( progObj, name );
}
}
return progObj;
}
ShaderProgram.AttributeIndex = function( progObj, name ) { return progObj.attribIndex[name]; }
ShaderProgram.UniformLocation = function( progObj, name ) { return progObj.unifomLocation[name]; }
ShaderProgram.Use = function( progObj ) { gl.useProgram( progObj ); }
ShaderProgram.SetUniformI1 = function( progObj, name, val ) { if(progObj.unifomLocation[name]) gl.uniform1i( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniformF1 = function( progObj, name, val ) { if(progObj.unifomLocation[name]) gl.uniform1f( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniformF2 = function( progObj, name, arr ) { if(progObj.unifomLocation[name]) gl.uniform2fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformF3 = function( progObj, name, arr ) { if(progObj.unifomLocation[name]) gl.uniform3fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformF4 = function( progObj, name, arr ) { if(progObj.unifomLocation[name]) gl.uniform4fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformM33 = function( progObj, name, mat ) { if(progObj.unifomLocation[name]) gl.uniformMatrix3fv( progObj.unifomLocation[name], false, mat ); }
ShaderProgram.SetUniformM44 = function( progObj, name, mat ) { if(progObj.unifomLocation[name]) gl.uniformMatrix4fv( progObj.unifomLocation[name], false, mat ); }
ShaderProgram.CompileShader = function( source, shaderStage ) {
var shaderScript = document.getElementById(source);
if (shaderScript)
source = shaderScript.text;
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
var status = gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS );
if ( !status ) alert(gl.getShaderInfoLog(shaderObj));
return status ? shaderObj : null;
}
ShaderProgram.LinkProgram = function( shaderObjs ) {
var prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( prog, shaderObjs[i_sh] );
gl.linkProgram( prog );
status = gl.getProgramParameter( prog, gl.LINK_STATUS );
if ( !status ) alert("Could not initialise shaders");
gl.useProgram( null );
return status ? prog : null;
}
var VertexBuffer = {};
VertexBuffer.Create = function( attributes, indices ) {
var buffer = {};
buffer.buf = [];
buffer.attr = []
for ( var i = 0; i < attributes.length; ++ i ) {
buffer.buf.push( gl.createBuffer() );
buffer.attr.push( { size : attributes[i].attrSize, loc : attributes[i].attrLoc } );
gl.bindBuffer( gl.ARRAY_BUFFER, buffer.buf[i] );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( attributes[i].data ), gl.STATIC_DRAW );
}
buffer.inx = gl.createBuffer();
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, buffer.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( indices ), gl.STATIC_DRAW );
buffer.inxLen = indices.length;
gl.bindBuffer( gl.ARRAY_BUFFER, null );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, null );
return buffer;
}
VertexBuffer.Draw = function( bufObj ) {
for ( var i = 0; i < bufObj.buf.length; ++ i ) {
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.buf[i] );
gl.vertexAttribPointer( bufObj.attr[i].loc, bufObj.attr[i].size, gl.FLOAT, false, 0, 0 );
gl.enableVertexAttribArray( bufObj.attr[i].loc );
}
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.drawElements( gl.TRIANGLES, bufObj.inxLen, gl.UNSIGNED_SHORT, 0 );
for ( var i = 0; i < bufObj.buf.length; ++ i )
gl.disableVertexAttribArray( bufObj.attr[i].loc );
gl.bindBuffer( gl.ARRAY_BUFFER, null );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, null );
}
initScene();
})();
html,body {
height: 100%;
width: 100%;
margin: 0;
overflow: hidden;
}
#gui {
position : absolute;
top : 0;
left : 0;
}
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision highp float;
attribute vec3 inPos;
attribute vec3 inNV;
attribute vec3 inCol;
varying vec3 vertPos;
varying vec3 vertNV;
varying vec3 vertCol;
uniform mat4 u_projectionMat44;
uniform mat4 u_viewMat44;
uniform mat4 u_modelMat44;
void main()
{
mat4 mv = u_viewMat44 * u_modelMat44;
vertCol = inCol;
vertNV = normalize(mat3(mv) * inNV);
vec4 viewPos = mv * vec4( inPos, 1.0 );
vertPos = viewPos.xyz;
gl_Position = u_projectionMat44 * viewPos;
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec3 vertPos;
varying vec3 vertNV;
varying vec3 vertCol;
uniform float u_shininess;
uniform float u_glow;
void main()
{
vec3 color = vertCol;
vec3 normalV = normalize( vertNV );
vec3 eyeV = normalize( -vertPos );
vec3 halfV = normalize( eyeV + normalV );
float NdotH = max( 0.0, dot( normalV, halfV ) );
float shineFac = ( u_shininess + 2.0 ) * pow( NdotH, u_shininess ) / ( 2.0 * 3.14159265 );
gl_FragColor = vec4( u_glow*0.1 + color.rgb * u_glow * shineFac * 0.5, 1.0 );
}
</script>
<script id="post-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
varying vec2 pos;
void main()
{
pos = inPos;
gl_Position = vec4( inPos, 0.0, 1.0 );
}
</script>
<script id="blurX-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 pos;
uniform sampler2D u_texture;
uniform vec2 u_textureSize;
uniform float u_sigma;
float CalcGauss( float x, float sigma )
{
float coeff = 1.0 / (2.0 * 3.14157 * sigma);
float expon = -(x*x) / (2.0 * sigma);
return (coeff*exp(expon));
}
void main()
{
vec2 texC = pos.st * 0.5 + 0.5;
vec4 texCol = texture2D( u_texture, texC );
vec4 gaussCol = vec4( texCol.rgb, 1.0 );
float stepX = 1.0 / u_textureSize.x;
for ( int i = 1; i <= 20; ++ i )
{
float weight = CalcGauss( float(i) / 32.0, u_sigma * 0.5 );
texCol = texture2D( u_texture, texC + vec2( float(i) * stepX, 0.0 ) );
gaussCol += vec4( texCol.rgb * weight, weight );
texCol = texture2D( u_texture, texC - vec2( float(i) * stepX, 0.0 ) );
gaussCol += vec4( texCol.rgb * weight, weight );
}
gaussCol.rgb /= gaussCol.w;
gl_FragColor = vec4( gaussCol.rgb, 1.0 );
}
</script>
<script id="blurY-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 pos;
uniform sampler2D u_texture;
uniform vec2 u_textureSize;
uniform float u_sigma;
float CalcGauss( float x, float sigma )
{
float coeff = 1.0 / (2.0 * 3.14157 * sigma);
float expon = -(x*x) / (2.0 * sigma);
return (coeff*exp(expon));
}
void main()
{
vec2 texC = pos.st * 0.5 + 0.5;
vec4 texCol = texture2D( u_texture, texC );
vec4 gaussCol = vec4( texCol.rgb, 1.0 );
float stepY = 1.0 / u_textureSize.y;
for ( int i = 1; i <= 20; ++ i )
{
float weight = CalcGauss( float(i) / 32.0, u_sigma * 0.5 );
texCol = texture2D( u_texture, texC + vec2( 0.0, float(i) * stepY ) );
gaussCol += vec4( texCol.rgb * weight, weight );
texCol = texture2D( u_texture, texC - vec2( 0.0, float(i) * stepY ) );
gaussCol += vec4( texCol.rgb * weight, weight );
}
vec3 hdrCol = 2.0 * gaussCol.xyz / gaussCol.w;
vec3 mappedCol = vec3( 1.0 ) - exp( -hdrCol.rgb * 3.0 );
gl_FragColor = vec4( clamp( mappedCol.rgb, 0.0, 1.0 ), 1.0 );
}
</script>
<div>
<form id="gui" name="inputs">
<table>
<tr> <td> <font color= #CCF>shininess</font> </td>
<td> <input type="range" id="shine" min="0" max="50" value="10" onchange="changeEventHandler(event);"/></td> </tr>
<tr> <td> <font color= #CCF>glow</font> </td>
<td> <input type="range" id="glow" min="100" max="400" value="250" onchange="changeEventHandler(event);"/></td> </tr>
<tr> <td> <font color= #CCF>blur</font> </td>
<td> <input type="range" id="sigma" min="1" max="100" value="60" onchange="changeEventHandler(event);"/></td> </tr>
</table>
</form>
</div>
<canvas id="canvas" style="border: none;" width="100%" height="100%"></canvas>
The website GLSL Sandbox has a collection of shader examples. This one has the glow and appears to be able to compile for ES.
You should be able to modify these to pull uv's from your texture.
Here is some code directly from this site:
#ifdef GL_ES
precision mediump float;
#endif
#extension GL_OES_standard_derivatives : enable
uniform float time;
uniform vec2 mouse;
uniform vec2 resolution;
void main(void){
vec2 p = (gl_FragCoord.xy * 2.0 - resolution) / min(resolution.x, resolution.y);
vec3 color1 = vec3(0.0, 0.3, 0.5);
vec3 color2 = vec3(0.5, 0.0, 0.3);
float f = 0.0;
float g = 0.0;
float h = 0.0;
float PI = 3.14159265;
for(float i = 0.0; i < 40.0; i++){
if (floor(mouse.x * 41.0) < i)
break;
float s = sin(time + i * PI / 20.0) * 0.8;
float c = cos(time + i * PI / 20.0) * 0.8;
float d = abs(p.x + c);
float e = abs(p.y + s);
f += 0.001 / d;
g += 0.001 / e;
h += 0.00003 / (d * e);
}
gl_FragColor = vec4(f * color1 + g * color2 + vec3(h), 1.0);
}

DirectX 10 Primitive is not displayed

I am trying to write my first DirectX 10 program that displays a triangle. Everything compiles fine, and the render function is called, since the background changes to black. However, the triangle I'm trying to draw with a triangle strip primitive is not displayed at all.
The Initialization function:
bool InitDirect3D(HWND hWnd, int width, int height)
{
//****** D3DDevice and SwapChain *****//
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(swapChainDesc));
swapChainDesc.BufferCount = 1;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.Windowed = TRUE;
if (FAILED(D3D10CreateDeviceAndSwapChain( NULL,
D3D10_DRIVER_TYPE_HARDWARE,
NULL,
0,
D3D10_SDK_VERSION,
&swapChainDesc,
&pSwapChain,
&pD3DDevice)))
return fatalError(TEXT("Hardware does not support DirectX 10!"));
//***** Shader *****//
if (FAILED(D3DX10CreateEffectFromFile( TEXT("basicEffect.fx"),
NULL, NULL,
"fx_4_0",
D3D10_SHADER_ENABLE_STRICTNESS,
0,
pD3DDevice,
NULL,
NULL,
&pBasicEffect,
NULL,
NULL)))
return fatalError(TEXT("Could not load effect file!"));
pBasicTechnique = pBasicEffect->GetTechniqueByName("Render");
pViewMatrixEffectVariable = pBasicEffect->GetVariableByName( "View" )->AsMatrix();
pProjectionMatrixEffectVariable = pBasicEffect->GetVariableByName( "Projection" )->AsMatrix();
pWorldMatrixEffectVariable = pBasicEffect->GetVariableByName( "World" )->AsMatrix();
//***** Input Assembly Stage *****//
D3D10_INPUT_ELEMENT_DESC layout[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D10_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D10_INPUT_PER_VERTEX_DATA, 0}
};
UINT numElements = 2;
D3D10_PASS_DESC PassDesc;
pBasicTechnique->GetPassByIndex(0)->GetDesc(&PassDesc);
if (FAILED( pD3DDevice->CreateInputLayout( layout,
numElements,
PassDesc.pIAInputSignature,
PassDesc.IAInputSignatureSize,
&pVertexLayout)))
return fatalError(TEXT("Could not create Input Layout."));
pD3DDevice->IASetInputLayout( pVertexLayout );
//***** Vertex buffer *****//
UINT numVertices = 100;
D3D10_BUFFER_DESC bd;
bd.Usage = D3D10_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(vertex) * numVertices;
bd.BindFlags = D3D10_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
if (FAILED(pD3DDevice->CreateBuffer(&bd, NULL, &pVertexBuffer)))
return fatalError(TEXT("Could not create vertex buffer!"));;
UINT stride = sizeof(vertex);
UINT offset = 0;
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
//***** Rasterizer *****//
// Set the viewport
viewPort.Width = width;
viewPort.Height = height;
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
viewPort.TopLeftX = 0;
viewPort.TopLeftY = 0;
pD3DDevice->RSSetViewports(1, &viewPort);
D3D10_RASTERIZER_DESC rasterizerState;
rasterizerState.CullMode = D3D10_CULL_NONE;
rasterizerState.FillMode = D3D10_FILL_SOLID;
rasterizerState.FrontCounterClockwise = true;
rasterizerState.DepthBias = false;
rasterizerState.DepthBiasClamp = 0;
rasterizerState.SlopeScaledDepthBias = 0;
rasterizerState.DepthClipEnable = true;
rasterizerState.ScissorEnable = false;
rasterizerState.MultisampleEnable = false;
rasterizerState.AntialiasedLineEnable = true;
ID3D10RasterizerState* pRS;
pD3DDevice->CreateRasterizerState(&rasterizerState, &pRS);
pD3DDevice->RSSetState(pRS);
//***** Output Merger *****//
// Get the back buffer from the swapchain
ID3D10Texture2D *pBackBuffer;
if (FAILED(pSwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&pBackBuffer)))
return fatalError(TEXT("Could not get back buffer."));
// create the render target view
if (FAILED(pD3DDevice->CreateRenderTargetView(pBackBuffer, NULL, &pRenderTargetView)))
return fatalError(TEXT("Could not create the render target view."));
// release the back buffer
pBackBuffer->Release();
// set the render target
pD3DDevice->OMSetRenderTargets(1, &pRenderTargetView, NULL);
return true;
}
The render function:
void Render()
{
if (pD3DDevice != NULL)
{
pD3DDevice->ClearRenderTargetView(pRenderTargetView, D3DXCOLOR(0.0f, 0.0f, 0.0f, 0.0f));
//create world matrix
static float r;
D3DXMATRIX w;
D3DXMatrixIdentity(&w);
D3DXMatrixRotationY(&w, r);
r += 0.001f;
//set effect matrices
pWorldMatrixEffectVariable->SetMatrix(w);
pViewMatrixEffectVariable->SetMatrix(viewMatrix);
pProjectionMatrixEffectVariable->SetMatrix(projectionMatrix);
//fill vertex buffer with vertices
UINT numVertices = 3;
vertex* v = NULL;
//lock vertex buffer for CPU use
pVertexBuffer->Map(D3D10_MAP_WRITE_DISCARD, 0, (void**) &v );
v[0] = vertex( D3DXVECTOR3(-1,-1,0), D3DXVECTOR4(1,0,0,1) );
v[1] = vertex( D3DXVECTOR3(0,1,0), D3DXVECTOR4(0,1,0,1) );
v[2] = vertex( D3DXVECTOR3(1,-1,0), D3DXVECTOR4(0,0,1,1) );
pVertexBuffer->Unmap();
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
//get technique desc
D3D10_TECHNIQUE_DESC techDesc;
pBasicTechnique->GetDesc(&techDesc);
for(UINT p = 0; p < techDesc.Passes; ++p)
{
//apply technique
pBasicTechnique->GetPassByIndex(p)->Apply(0);
//draw
pD3DDevice->Draw(numVertices, 0);
}
pSwapChain->Present(0,0);
}
}
I'm not sure try to set:
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
after you unmap the the buffer. To get something like this:
pVertexBuffer->Unmap();
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
I suspect that locking blows avay buffer binding

SlimDX (DirectX10) - How to change a texel in Texture?

I try to change the texels of a Texture which is already loaded.
My assumption was to use the Texture2D::Map and UnMap functions, but there is no change when I change the data of given DataRectangle.
I need a simple example like, creating a texture of 128x128 with a gradient from black to white from each side.
Thx
ps: A Direct3D 10 C++ example may also help, SlimDX is only a wrapper and has nearly complete the same functions.
This is my D3D10 2D texture loader
bool D3D10Texture::Init( GFXHandler* pHandler, unsigned int usage, unsigned int width, unsigned int height, unsigned int textureType, bool bMipmapped, void* pTextureData )
{
mMipmapped = bMipmapped;
//SetData( pHandler, 0 );
D3D10Handler* pD3DHandler = (D3D10Handler*)pHandler;
ID3D10Device* pDevice = pD3DHandler->GetDevice();
DXGI_SAMPLE_DESC dxgiSampleDesc;
dxgiSampleDesc.Count = 1;
dxgiSampleDesc.Quality = 0;
D3D10_USAGE d3d10Usage;
if ( usage & RU_All_Dynamic ) d3d10Usage = D3D10_USAGE_DYNAMIC;
else d3d10Usage = D3D10_USAGE_DEFAULT;
//unsigned int cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
unsigned int cpuAccess = 0;
if ( !pTextureData )
{
cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
}
unsigned int bindFlags = D3D10_BIND_SHADER_RESOURCE;
if ( usage & RU_Texture_RenderTarget ) bindFlags |= D3D10_BIND_RENDER_TARGET;
unsigned int miscFlags = 0;
if ( usage & RU_Texture_AutoGenMipmap ) miscFlags |= D3D10_RESOURCE_MISC_GENERATE_MIPS;
D3D10_TEXTURE2D_DESC d3d10Texture2DDesc;
d3d10Texture2DDesc.Width = width;
d3d10Texture2DDesc.Height = height;
d3d10Texture2DDesc.MipLevels = GetNumMipMaps( width, height, bMipmapped );
d3d10Texture2DDesc.ArraySize = 1;
d3d10Texture2DDesc.Format = GetD3DFormat( (TextureTypes)textureType );
d3d10Texture2DDesc.SampleDesc = dxgiSampleDesc;
d3d10Texture2DDesc.Usage = d3d10Usage;
d3d10Texture2DDesc.BindFlags = D3D10_BIND_SHADER_RESOURCE;
d3d10Texture2DDesc.CPUAccessFlags = cpuAccess;
d3d10Texture2DDesc.MiscFlags = miscFlags;
//D3D10_SUBRESOURCE_DATA d3d10SubResourceData;
//d3d10SubResourceData.pSysMem = pTextureData;
//d3d10SubResourceData.SysMemPitch = GetPitch( width, (TextureTypes)textureType );
//d3d10SubResourceData.SysMemSlicePitch = 0;
D3D10_SUBRESOURCE_DATA* pSubResourceData = NULL;
if ( pTextureData )
{
pSubResourceData = new D3D10_SUBRESOURCE_DATA[d3d10Texture2DDesc.MipLevels];
char* pTexPos = (char*)pTextureData;
unsigned int pitch = GetPitch( width, (TextureTypes)textureType );
unsigned int count = 0;
unsigned int max = d3d10Texture2DDesc.MipLevels;
while( count < max )
{
pSubResourceData[count].pSysMem = pTexPos;
pSubResourceData[count].SysMemPitch = pitch;
pSubResourceData[count].SysMemSlicePitch = 0;
pTexPos += pitch * height;
pitch >>= 1;
count++;
}
}
if ( FAILED( pDevice->CreateTexture2D( &d3d10Texture2DDesc, pSubResourceData, &mpTexture ) ) )
{
return false;
}
if ( pSubResourceData )
{
delete[] pSubResourceData;
pSubResourceData = NULL;
}
mWidth = width;
mHeight = height;
mFormat = (TextureTypes)textureType;
mpTexture->AddRef();
mpTexture->Release();
D3D10_SHADER_RESOURCE_VIEW_DESC d3d10ShaderResourceViewDesc;
d3d10ShaderResourceViewDesc.Format = d3d10Texture2DDesc.Format;
d3d10ShaderResourceViewDesc.ViewDimension = D3D10_SRV_DIMENSION_TEXTURE2D;
d3d10ShaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
d3d10ShaderResourceViewDesc.Texture2D.MipLevels = GetNumMipMaps( width, height, bMipmapped );
if ( FAILED( pDevice->CreateShaderResourceView( mpTexture, &d3d10ShaderResourceViewDesc, &mpView ) ) )
{
return false;
}
ResourceRecorder::Instance()->AddResource( this );
return true;
}
With that function all you need to do is pass in the whit to black texture. For example to write a 256x256 textue with each horizontal line being one brighter than the previous line the following code will work
int* pTexture = new int[256 * 256];
int count = 0;
while( count < 256 )
{
int count2 = 0;
while( count2 < 256 )
{
pTexture[(count * 256) + count2] = 0xff000000 | (count << 16) | (count << 8) | count;
count2++;
}
count++;
}
Make sure you follow the rules in the "Resource Usage Restrictions" section:
MSDN: D3D10_USAGE
public void NewData(byte[] newData)
{
DataRectangle mappedTex = null;
//assign and lock the resource
mappedTex = pTexture.Map(0, D3D10.MapMode.WriteDiscard, D3D10.MapFlags.None);
// if unable to hold texture
if (!mappedTex.Data.CanWrite)
{
throw new ApplicationException("Cannot Write to the Texture");
}
// write new data to the texture
mappedTex.Data.WriteRange<byte>(newData);
// unlock the resource
pTexture.Unmap(0);
if (samplerflag)
temptex = newData;
}
this overwrites the buffer on every new frame, you may want to use a D3D10.MapMode.readwrite or something if ur only trying to write one texel
you will also need to write to the datarectangle in a specific point using one of the other write functions

Resources