RDP ActiveX SendKeys Win+L to Lock Screen - activex

Working on an application that uses RDP ActiveX controls. Trying to use IMsRdpClientNonScriptable::SendKeys to simulate Win + L combo to Lock the screen within RDP session, but its not working. Only L key seems to go through?
long nKeyCount = 4;
VARIANT_BOOL keyUp[4];
long keyData[4];
keyData[0] = 92;
keyData[1] = SC_L;
keyData[2] = SC_L;
keyData[3] = 92;
keyUp[0] = VARIANT_FALSE;
keyUp[1] = VARIANT_FALSE;
keyUp[2] = VARIANT_TRUE;
keyUp[3] = VARIANT_TRUE;
rdpclient.SendKeys(nKeyCount, &keyUp[0], &keyData[0]);
Edit: Tried assigning values using LPARAM format. This didn't work either.
keyData[0] = WmKeyDown_lParam(92, 0, 1);
keyData[1] = WmKeyDown_lParam(SC_L, 0, 1);
keyData[2] = WmKeyDown_lParam(SC_L, 1, 1);
keyData[3] = WmKeyDown_lParam(92, 1, 1);
long Rdp::WmKeyDown_lParam(uint scanCode, uint prevState, uint extended)
{
//scan does works
uint repeatCount = 0;
uint context = 0;
uint previousState = prevState;
uint transition = 0;
// combine the parameters above according to the bit
// fields described in the MSDN page for WM_KEYDOWN
long lParam = repeatCount
| (scanCode << 16)
| (extended << 24)
| (context << 29)
| (previousState << 30)
| (transition << 31);
//lParam = scanCode << 16;
return lParam;
}

Related

reading GPU resource data by CPU

i am learning directx11 these days. and i have been stuck in compute shader section.
so i made four resource and three corresponding view.
immutable input buffer = {1,1,1,1,1} / SRV
immutable input buffer = {2,2,2,2,2} / SRV
output buffer / UAV
staging buffer for reading / No View
and i succeeded to create all things, and dispatch cs function, and copy data from output buffer to staging buffer, and i read/check data.
// INPUT BUFFER1--------------------------------------------------
const int dataSize = 5;
D3D11_BUFFER_DESC vb_dest;
vb_dest.ByteWidth = sizeof(float) * dataSize;
vb_dest.StructureByteStride = sizeof(float);
vb_dest.BindFlags = D3D11_BIND_SHADER_RESOURCE;
vb_dest.Usage = D3D11_USAGE_IMMUTABLE;
vb_dest.CPUAccessFlags = 0;
vb_dest.MiscFlags = 0;
float v1_float[dataSize] = { 1,1,1,1,1 };
D3D11_SUBRESOURCE_DATA v1_data;
v1_data.pSysMem = static_cast<void*>(v1_float);
device->CreateBuffer(
&vb_dest,
&v1_data,
valueBuffer1.GetAddressOf());
D3D11_SHADER_RESOURCE_VIEW_DESC srv_desc;
srv_desc.Format = DXGI_FORMAT_R32_FLOAT;
srv_desc.ViewDimension = D3D11_SRV_DIMENSION_BUFFER;
srv_desc.Buffer.FirstElement = 0;
srv_desc.Buffer.NumElements = dataSize;
srv_desc.Buffer.ElementWidth = sizeof(float);
device->CreateShaderResourceView(
valueBuffer1.Get(),
&srv_desc,
inputSRV1.GetAddressOf());
// INPUT BUFFER2-----------------------------------------------------------
float v2_float[dataSize] = { 2,2,2,2,2 };
D3D11_SUBRESOURCE_DATA v2_data;
v2_data.pSysMem = static_cast<void*>(v2_float);
device->CreateBuffer(
&vb_dest,
&v2_data,
valueBuffer2.GetAddressOf());
device->CreateShaderResourceView(
valueBuffer2.Get(),
&srv_desc,
inputSRV2.GetAddressOf());
// OUTPUT BUFFER-----------------------------------------------------------
D3D11_BUFFER_DESC ov_desc;
ov_desc.ByteWidth = sizeof(float) * dataSize;
ov_desc.StructureByteStride = sizeof(float);
ov_desc.BindFlags = D3D11_BIND_UNORDERED_ACCESS;
ov_desc.Usage = D3D11_USAGE_DEFAULT;
ov_desc.CPUAccessFlags = 0;
ov_desc.MiscFlags = 0;
device->CreateBuffer(
&ov_desc,
nullptr,
outputResource.GetAddressOf());
D3D11_UNORDERED_ACCESS_VIEW_DESC outputUAV_desc;
outputUAV_desc.Format = DXGI_FORMAT_R32_FLOAT;
outputUAV_desc.ViewDimension = D3D11_UAV_DIMENSION_BUFFER;
outputUAV_desc.Buffer.FirstElement = 0;
outputUAV_desc.Buffer.NumElements = dataSize;
outputUAV_desc.Buffer.Flags = 0;
device->CreateUnorderedAccessView(
outputResource.Get(),
&outputUAV_desc,
outputUAV.GetAddressOf());
// BUFFER FOR COPY-----------------------------------------------------------
D3D11_BUFFER_DESC rb_desc;
rb_desc.ByteWidth = sizeof(float) * dataSize;
rb_desc.StructureByteStride = sizeof(float);
rb_desc.Usage = D3D11_USAGE_STAGING;
rb_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
rb_desc.BindFlags = 0;
rb_desc.MiscFlags = 0;
device->CreateBuffer(
&rb_desc,
nullptr,
readResource.GetAddressOf());
// DISPATCH and COPY and GET DATA
dContext->CSSetShaderResources(0, 1, inputSRV1.GetAddressOf());
dContext->CSSetShaderResources(1, 1, inputSRV2.GetAddressOf());
dContext->CSSetUnorderedAccessViews(0, 1, outputUAV.GetAddressOf(), nullptr);
dContext->CSSetShader(cs.Get(), nullptr, 0);
dContext->Dispatch(1, 1, 1);
dContext->CopyResource(readResource.Get(), outputResource.Get());
D3D11_MAPPED_SUBRESOURCE mappedResource2;
ZeroMemory(&mappedResource2, sizeof(D3D11_MAPPED_SUBRESOURCE));
R_CHECK(dContext->Map(readResource.Get(), 0, D3D11_MAP_READ, 0, &mappedResource2));
float* data = static_cast<float*>(mappedResource2.pData);
for (int i = 0; i < 5; ++i)
{
int a = data[i];
}
and this is compute shader code
StructuredBuffer<float> inputA : register(t0);
StructuredBuffer<float> inputB : register(t1);
RWStructuredBuffer<float> output : register(u0);
[numthreads(5, 1, 1)]
void main(int3 id : SV_DispatchThreadID)
{
output[id.x] = inputA[id.x] + inputB[id.x];
}
in CS, it's adding two input buffer data and store into output buffer.
so expected answer would be {3,3,3,3,3}.
but the result is {3,0,0,0,0} only first idx has proper answer.
any advice would be amazing.
dContext->CopyResource(readResource.Get(), outputResource.Get());
D3D11_MAPPED_SUBRESOURCE mappedResource2;
ZeroMemory(&mappedResource2, sizeof(D3D11_MAPPED_SUBRESOURCE));
R_CHECK(dContext->Map(readResource.Get(), 0, D3D11_MAP_READ, 0, &mappedResource2));
float* data = static_cast<float*>(mappedResource2.pData);
for (int i = 0; i < 5; ++i)
{
int a = data[i];
}
this code should be like this.
CopyResource();
Map();
Declare and allocate 'data'
zeromemory(data);
memcopy(data, resource's pointer);
unMap();
for some reason, i have to use the memcopy instead of just reading resource directly with the pointer that i get from mapping.

D3DX9 Custom mesh overlaps with itself during render

I have custom model file format that I am reading from to create a model in DX. I use
DWORD dwFVF = ( D3DFVF_XYZ | D3DFVF_NORMAL | D3DFVF_TEX1 );
D3DXCreateMeshFVF(numIndices/3, numVertices, D3DXMESH_MANAGED, dwFVF, *d3ddev, mesh);
to create the mesh, then lock, fill, unlock the index buffer, vertex buffer, and attribute buffer in turn.
void createMeshFromSkn(ifstream* fHandle, LPDIRECT3DDEVICE9 * d3ddev, LPD3DXMESH * mesh)
{
// Start reading the file
int magic = readInt(fHandle);
short version = readShort(fHandle);
short numObjects = readShort(fHandle);
SKNMaterial *materialHeaders;
if (version > 0)
{
// Read in the material headers
int numMaterialHeaders = readInt(fHandle);
fHandle->seekg((16 + MATERIAL_NAME_SIZE) * numMaterialHeaders, ios::cur);
// Read in model data.
int numIndices = readInt(fHandle);
int numVertices = readInt(fHandle);
// Create the mesh
DWORD dwFVF = ( D3DFVF_XYZ | D3DFVF_NORMAL | D3DFVF_TEX1 );
D3DXCreateMeshFVF(numIndices/3, numVertices, D3DXMESH_MANAGED, dwFVF, *d3ddev, mesh);
// Read in the index buffer
WORD* indexBuffer = 0;
(*mesh)->LockIndexBuffer(0, (void**)&indexBuffer);
for (int i = 0; i < numIndices; i++)
{
indexBuffer[i] = readShort(fHandle);
}
(*mesh)->UnlockIndexBuffer();
// Read in the vertexBuffer
D3DVERTEX* vertexBuffer;
(*mesh)->LockVertexBuffer( 0, (void**)&vertexBuffer);
for (int i = 0; i < numVertices; ++i)
{
((D3DVERTEX*)vertexBuffer)[i].position.x = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].position.y = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].position.z = readFloat(fHandle);
for (unsigned int j = 0; j < BONE_INDEX_SIZE; ++j)
{
int bone = (int) readByte(fHandle);
//data->vertices[i].boneIndex[j] = bone;
}
//////////////////////////////////////////////////////////////////////////
//
// Need to fix this to work with bones
//
//////////////////////////////////////////////////////////////////////////
D3DXVECTOR4 weight;
weight.x = readFloat(fHandle);
weight.y = readFloat(fHandle);
weight.z = readFloat(fHandle);
weight.w = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.x = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.y = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.z = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].tu = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].tv = readFloat(fHandle);
}
(*mesh)->UnlockVertexBuffer();
DWORD *pAttribBuf;
HRESULT hRslt = (*mesh)->LockAttributeBuffer(0, &pAttribBuf);
if(hRslt != D3D_OK)
return; // Add error handling
unsigned int numFaces = (*mesh)->GetNumFaces();
for(unsigned int i=0; i<numFaces; i++)
pAttribBuf[i]= 0;
hRslt = (*mesh)->UnlockAttributeBuffer();
if(hRslt != D3D_OK)
return; // Add error handling
DWORD *m_pAdjacencyBuffer;
m_pAdjacencyBuffer = new DWORD[3 * (*mesh)->GetNumFaces()];
(*mesh)->GenerateAdjacency(0.0f, m_pAdjacencyBuffer);
(*mesh)->OptimizeInplace(D3DXMESHOPT_ATTRSORT | D3DXMESHOPT_VERTEXCACHE, m_pAdjacencyBuffer, NULL, NULL, NULL);
}
return;
}
My problem is that the model is overlapping with itself:
http://imageshack.us/a/img210/2732/20121018181019896.png
I have CCW backface culling enabled:
d3ddev->SetRenderState(D3DRS_CULLMODE, D3DCULL_CCW);
I also have z-buffer enabled, but I'm pretty sure that's only between two meshes, not between a mesh and itself.
I've spent the last day and a half trying to Google for a solution, but I couldn't find anything. Any help or links to help would be greatly appreciated.
It turns out I hadn't actually turned on Z-buffering, because I needed to turn it on in the d3d presentation parameters:
d3dpp.EnableAutoDepthStencil = TRUE;
d3dpp.AutoDepthStencilFormat = D3DFMT_D16;
Once I did that and added a
d3ddev->Clear(0, NULL, D3DCLEAR_ZBUFFER, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
to the render loop, it renders correctly.
Wow, so glad I figured this out. I hope this helps others in their explorations of DX

What the DeviceIocontrol API function will do?

Please explain what does this VC++ code do? Is it possible to convert this code to Delphi2010?
void CDMOnLineView::OnActionGetdata()
{
bool retCode;
DWORD retByte = 0;
int TmpHigh, TmpLow;
UCHAR HIDData[64];
int LastX, LastY;
UCHAR Button;
CDC* pViewDC = GetDC();
if(yPos > 500) yPos = 0;
else yPos = yPos + 16;
if(hDriver == NULL)
{
pViewDC->TextOut(10,yPos,"Driver not connect yet.");
}
else
{
IO_Param.CallerHandle = m_hWnd;
IO_Param.Model = DM_A4;
retCode = DeviceIoControl(hDriver, IOCTL_DM_READ_DATA, &IO_Param, sizeof(DM_PARAM), HIDData,
6, &retByte, NULL);
if(retCode)
{
if(retByte != 0)
{
Button = HIDData[1] & 0x01;
TmpLow = (int)HIDData[2];
TmpHigh = (int)HIDData[3];
LastX = (TmpLow & 0x00FF) | ((TmpHigh << 8) & 0xFF00);
TmpLow = (int)HIDData[4];
TmpHigh = (int)HIDData[5];
LastY = (TmpLow & 0x00FF) | ((TmpHigh << 8) & 0xFF00);
sprintf(szStringBuffer, "Button: %d, X: %.5d, Y: %.5d", Button, LastX, LastY);
pViewDC->TextOut(10,yPos,szStringBuffer, strlen(szStringBuffer));
}
else pViewDC->TextOut(10,yPos,"Return bytes incorrect.");
}
else
{
ErrorCode = GetLastError();
sprintf(szStringBuffer, "Call IOCTL_DM_READ_DATA fail. Error: %d", ErrorCode);
pViewDC->TextOut(10,yPos,szStringBuffer, strlen(szStringBuffer));
}
}
ReleaseDC(pViewDC);
}
What the DeviceIocontrol function will do? Please try to explain the parameters also.
thanks all.
Here's the "translation" of all those bitwise operations in the code, hopefully those would get you going:
The operators you need to know about:
& as the bitwise AND operator.
| is the bitwise OR operator
<< is the bitwise SHIFT LEFT operator
The translations:
Button = HIDData[1] & 0x01; // C
Button := HIDData[1] and $01; // Delphi
TmpLow = (int)HIDData[2]; // C
TmpLow := Integer(HIDData[2]); // Delphi
TmpHigh = (int)HIDData[3]; // C
TmpHigh := Integer(HidData[3]); // Delphi
LastX = (TmpLow & 0x00FF) | ((TmpHigh << 8) & 0xFF00); // C
LastX := (TmpLow and $00FF) or ((TmpHigh shl 8) and $FF00); // Delphi
TmpLow = (int)HIDData[4]; // C
TmpLow := Integer(HIDData[4]); // Delphi
TmpHigh = (int)HIDData[5]; // C
TmpHigh := Integer(HIDData[5]); // Delphi
LastY = (TmpLow & 0x00FF) | ((TmpHigh << 8) & 0xFF00); // C
LastY := (TmpLow and $00FF) or ((TmpHigh shl 8) and $FF00); // Delphi
sprintf(szStringBuffer, "Button: %d, X: %.5d, Y: %.5d", Button, LastX, LastY); // C
pViewDC->TextOut(10,yPos,szStringBuffer, strlen(szStringBuffer)); // C
Caption := Format('Button: %d, x: %.5d, y: %.5d', [Button, LastX, LastY]); // Delphi
DeviceIoControl calls custom driver function. Driver is kernel-mode program representing some computer device. Drivers have standard operations (like open, close, read, write, which are called using CreateFile, CloseHandle, ReadFile and WriteFile API) and custom driver-specific operations, called using DeviceIoControl. Details about these operations are described in the driver documentation.
Every custom operation has generic interface: operation code, input and output buffers, which may contain any information.
The DeviceIoControl function is documented at MSDN. User mode programs use it to interact with device drivers.
Converting this code is pretty simple. The call to DeviceIoControl maps across trivially. The only area that you are likely to struggle with is the C bitwise operations. If you don't have a copy of K&R to hand, then you should!

DirectX 10 Primitive is not displayed

I am trying to write my first DirectX 10 program that displays a triangle. Everything compiles fine, and the render function is called, since the background changes to black. However, the triangle I'm trying to draw with a triangle strip primitive is not displayed at all.
The Initialization function:
bool InitDirect3D(HWND hWnd, int width, int height)
{
//****** D3DDevice and SwapChain *****//
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(swapChainDesc));
swapChainDesc.BufferCount = 1;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.Windowed = TRUE;
if (FAILED(D3D10CreateDeviceAndSwapChain( NULL,
D3D10_DRIVER_TYPE_HARDWARE,
NULL,
0,
D3D10_SDK_VERSION,
&swapChainDesc,
&pSwapChain,
&pD3DDevice)))
return fatalError(TEXT("Hardware does not support DirectX 10!"));
//***** Shader *****//
if (FAILED(D3DX10CreateEffectFromFile( TEXT("basicEffect.fx"),
NULL, NULL,
"fx_4_0",
D3D10_SHADER_ENABLE_STRICTNESS,
0,
pD3DDevice,
NULL,
NULL,
&pBasicEffect,
NULL,
NULL)))
return fatalError(TEXT("Could not load effect file!"));
pBasicTechnique = pBasicEffect->GetTechniqueByName("Render");
pViewMatrixEffectVariable = pBasicEffect->GetVariableByName( "View" )->AsMatrix();
pProjectionMatrixEffectVariable = pBasicEffect->GetVariableByName( "Projection" )->AsMatrix();
pWorldMatrixEffectVariable = pBasicEffect->GetVariableByName( "World" )->AsMatrix();
//***** Input Assembly Stage *****//
D3D10_INPUT_ELEMENT_DESC layout[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D10_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D10_INPUT_PER_VERTEX_DATA, 0}
};
UINT numElements = 2;
D3D10_PASS_DESC PassDesc;
pBasicTechnique->GetPassByIndex(0)->GetDesc(&PassDesc);
if (FAILED( pD3DDevice->CreateInputLayout( layout,
numElements,
PassDesc.pIAInputSignature,
PassDesc.IAInputSignatureSize,
&pVertexLayout)))
return fatalError(TEXT("Could not create Input Layout."));
pD3DDevice->IASetInputLayout( pVertexLayout );
//***** Vertex buffer *****//
UINT numVertices = 100;
D3D10_BUFFER_DESC bd;
bd.Usage = D3D10_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(vertex) * numVertices;
bd.BindFlags = D3D10_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
if (FAILED(pD3DDevice->CreateBuffer(&bd, NULL, &pVertexBuffer)))
return fatalError(TEXT("Could not create vertex buffer!"));;
UINT stride = sizeof(vertex);
UINT offset = 0;
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
//***** Rasterizer *****//
// Set the viewport
viewPort.Width = width;
viewPort.Height = height;
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
viewPort.TopLeftX = 0;
viewPort.TopLeftY = 0;
pD3DDevice->RSSetViewports(1, &viewPort);
D3D10_RASTERIZER_DESC rasterizerState;
rasterizerState.CullMode = D3D10_CULL_NONE;
rasterizerState.FillMode = D3D10_FILL_SOLID;
rasterizerState.FrontCounterClockwise = true;
rasterizerState.DepthBias = false;
rasterizerState.DepthBiasClamp = 0;
rasterizerState.SlopeScaledDepthBias = 0;
rasterizerState.DepthClipEnable = true;
rasterizerState.ScissorEnable = false;
rasterizerState.MultisampleEnable = false;
rasterizerState.AntialiasedLineEnable = true;
ID3D10RasterizerState* pRS;
pD3DDevice->CreateRasterizerState(&rasterizerState, &pRS);
pD3DDevice->RSSetState(pRS);
//***** Output Merger *****//
// Get the back buffer from the swapchain
ID3D10Texture2D *pBackBuffer;
if (FAILED(pSwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&pBackBuffer)))
return fatalError(TEXT("Could not get back buffer."));
// create the render target view
if (FAILED(pD3DDevice->CreateRenderTargetView(pBackBuffer, NULL, &pRenderTargetView)))
return fatalError(TEXT("Could not create the render target view."));
// release the back buffer
pBackBuffer->Release();
// set the render target
pD3DDevice->OMSetRenderTargets(1, &pRenderTargetView, NULL);
return true;
}
The render function:
void Render()
{
if (pD3DDevice != NULL)
{
pD3DDevice->ClearRenderTargetView(pRenderTargetView, D3DXCOLOR(0.0f, 0.0f, 0.0f, 0.0f));
//create world matrix
static float r;
D3DXMATRIX w;
D3DXMatrixIdentity(&w);
D3DXMatrixRotationY(&w, r);
r += 0.001f;
//set effect matrices
pWorldMatrixEffectVariable->SetMatrix(w);
pViewMatrixEffectVariable->SetMatrix(viewMatrix);
pProjectionMatrixEffectVariable->SetMatrix(projectionMatrix);
//fill vertex buffer with vertices
UINT numVertices = 3;
vertex* v = NULL;
//lock vertex buffer for CPU use
pVertexBuffer->Map(D3D10_MAP_WRITE_DISCARD, 0, (void**) &v );
v[0] = vertex( D3DXVECTOR3(-1,-1,0), D3DXVECTOR4(1,0,0,1) );
v[1] = vertex( D3DXVECTOR3(0,1,0), D3DXVECTOR4(0,1,0,1) );
v[2] = vertex( D3DXVECTOR3(1,-1,0), D3DXVECTOR4(0,0,1,1) );
pVertexBuffer->Unmap();
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
//get technique desc
D3D10_TECHNIQUE_DESC techDesc;
pBasicTechnique->GetDesc(&techDesc);
for(UINT p = 0; p < techDesc.Passes; ++p)
{
//apply technique
pBasicTechnique->GetPassByIndex(p)->Apply(0);
//draw
pD3DDevice->Draw(numVertices, 0);
}
pSwapChain->Present(0,0);
}
}
I'm not sure try to set:
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
after you unmap the the buffer. To get something like this:
pVertexBuffer->Unmap();
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
I suspect that locking blows avay buffer binding

SlimDX (DirectX10) - How to change a texel in Texture?

I try to change the texels of a Texture which is already loaded.
My assumption was to use the Texture2D::Map and UnMap functions, but there is no change when I change the data of given DataRectangle.
I need a simple example like, creating a texture of 128x128 with a gradient from black to white from each side.
Thx
ps: A Direct3D 10 C++ example may also help, SlimDX is only a wrapper and has nearly complete the same functions.
This is my D3D10 2D texture loader
bool D3D10Texture::Init( GFXHandler* pHandler, unsigned int usage, unsigned int width, unsigned int height, unsigned int textureType, bool bMipmapped, void* pTextureData )
{
mMipmapped = bMipmapped;
//SetData( pHandler, 0 );
D3D10Handler* pD3DHandler = (D3D10Handler*)pHandler;
ID3D10Device* pDevice = pD3DHandler->GetDevice();
DXGI_SAMPLE_DESC dxgiSampleDesc;
dxgiSampleDesc.Count = 1;
dxgiSampleDesc.Quality = 0;
D3D10_USAGE d3d10Usage;
if ( usage & RU_All_Dynamic ) d3d10Usage = D3D10_USAGE_DYNAMIC;
else d3d10Usage = D3D10_USAGE_DEFAULT;
//unsigned int cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
unsigned int cpuAccess = 0;
if ( !pTextureData )
{
cpuAccess = D3D10_CPU_ACCESS_WRITE;
//if ( (usage & RU_Buffer_WriteOnly) == 0 ) cpuAccess |= D3D10_CPU_ACCESS_READ;
}
unsigned int bindFlags = D3D10_BIND_SHADER_RESOURCE;
if ( usage & RU_Texture_RenderTarget ) bindFlags |= D3D10_BIND_RENDER_TARGET;
unsigned int miscFlags = 0;
if ( usage & RU_Texture_AutoGenMipmap ) miscFlags |= D3D10_RESOURCE_MISC_GENERATE_MIPS;
D3D10_TEXTURE2D_DESC d3d10Texture2DDesc;
d3d10Texture2DDesc.Width = width;
d3d10Texture2DDesc.Height = height;
d3d10Texture2DDesc.MipLevels = GetNumMipMaps( width, height, bMipmapped );
d3d10Texture2DDesc.ArraySize = 1;
d3d10Texture2DDesc.Format = GetD3DFormat( (TextureTypes)textureType );
d3d10Texture2DDesc.SampleDesc = dxgiSampleDesc;
d3d10Texture2DDesc.Usage = d3d10Usage;
d3d10Texture2DDesc.BindFlags = D3D10_BIND_SHADER_RESOURCE;
d3d10Texture2DDesc.CPUAccessFlags = cpuAccess;
d3d10Texture2DDesc.MiscFlags = miscFlags;
//D3D10_SUBRESOURCE_DATA d3d10SubResourceData;
//d3d10SubResourceData.pSysMem = pTextureData;
//d3d10SubResourceData.SysMemPitch = GetPitch( width, (TextureTypes)textureType );
//d3d10SubResourceData.SysMemSlicePitch = 0;
D3D10_SUBRESOURCE_DATA* pSubResourceData = NULL;
if ( pTextureData )
{
pSubResourceData = new D3D10_SUBRESOURCE_DATA[d3d10Texture2DDesc.MipLevels];
char* pTexPos = (char*)pTextureData;
unsigned int pitch = GetPitch( width, (TextureTypes)textureType );
unsigned int count = 0;
unsigned int max = d3d10Texture2DDesc.MipLevels;
while( count < max )
{
pSubResourceData[count].pSysMem = pTexPos;
pSubResourceData[count].SysMemPitch = pitch;
pSubResourceData[count].SysMemSlicePitch = 0;
pTexPos += pitch * height;
pitch >>= 1;
count++;
}
}
if ( FAILED( pDevice->CreateTexture2D( &d3d10Texture2DDesc, pSubResourceData, &mpTexture ) ) )
{
return false;
}
if ( pSubResourceData )
{
delete[] pSubResourceData;
pSubResourceData = NULL;
}
mWidth = width;
mHeight = height;
mFormat = (TextureTypes)textureType;
mpTexture->AddRef();
mpTexture->Release();
D3D10_SHADER_RESOURCE_VIEW_DESC d3d10ShaderResourceViewDesc;
d3d10ShaderResourceViewDesc.Format = d3d10Texture2DDesc.Format;
d3d10ShaderResourceViewDesc.ViewDimension = D3D10_SRV_DIMENSION_TEXTURE2D;
d3d10ShaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
d3d10ShaderResourceViewDesc.Texture2D.MipLevels = GetNumMipMaps( width, height, bMipmapped );
if ( FAILED( pDevice->CreateShaderResourceView( mpTexture, &d3d10ShaderResourceViewDesc, &mpView ) ) )
{
return false;
}
ResourceRecorder::Instance()->AddResource( this );
return true;
}
With that function all you need to do is pass in the whit to black texture. For example to write a 256x256 textue with each horizontal line being one brighter than the previous line the following code will work
int* pTexture = new int[256 * 256];
int count = 0;
while( count < 256 )
{
int count2 = 0;
while( count2 < 256 )
{
pTexture[(count * 256) + count2] = 0xff000000 | (count << 16) | (count << 8) | count;
count2++;
}
count++;
}
Make sure you follow the rules in the "Resource Usage Restrictions" section:
MSDN: D3D10_USAGE
public void NewData(byte[] newData)
{
DataRectangle mappedTex = null;
//assign and lock the resource
mappedTex = pTexture.Map(0, D3D10.MapMode.WriteDiscard, D3D10.MapFlags.None);
// if unable to hold texture
if (!mappedTex.Data.CanWrite)
{
throw new ApplicationException("Cannot Write to the Texture");
}
// write new data to the texture
mappedTex.Data.WriteRange<byte>(newData);
// unlock the resource
pTexture.Unmap(0);
if (samplerflag)
temptex = newData;
}
this overwrites the buffer on every new frame, you may want to use a D3D10.MapMode.readwrite or something if ur only trying to write one texel
you will also need to write to the datarectangle in a specific point using one of the other write functions

Resources