How can i set from openCV the focus of a webcam or any other camera? I would like to find the distance of on object, but I want to write the program, so I need to manually be able to focus, manually meaning from code.
I'm using Logitech webcams (tested C525, C920 and C931e). The key 28 is for setting focus. Note that the focus value should be multiples of 5 (0, 5, 10... 255), otherwise the VideoCapture object would simply not respond.
import cv2
cam = cv2.VideoCapture(0)
focus = 0 # min: 0, max: 255, increment:5
cam.set(28, focus)
You can't set focus from opencv, but windows SDK allows it. Take a look at: http://msdn.microsoft.com/en-us/library/windows/hardware/ff567802(v=vs.85).aspx
I've used setting of minidriver properties for focus control, and it works perfect with logitch 905c and 920c.
I found the code example on my disk, hope it'll be userful:
/*****************************************************************************
* DirectShow Pan/Tilt/Zoom sample for Logitech QuickCam devices
*
* Copyright 2007 (c) Logitech. All Rights Reserved.
*
* This code and information is provided "as is" without warranty of
* any kind, either expressed or implied, including but not limited to
* the implied warranties of merchantability and/or fitness for a
* particular purpose.
*
* Version: 1.1
****************************************************************************/
#include <dshow.h>
#include <Ks.h> // Required by KsMedia.h
#include <KsMedia.h> // For KSPROPERTY_CAMERACONTROL_FLAGS_*
struct ControlInfo {
long min;
long max;
long step;
long def;
long flags;
};
/*
* Print information about a control in an easily readable fashion.
*/
void print_control_info(ControlInfo *info)
{
char flags[32] = "";
if(info->flags & KSPROPERTY_CAMERACONTROL_FLAGS_AUTO)
{
strcat_s(flags, sizeof(flags), "AUTO | ");
}
else if(info->flags & KSPROPERTY_CAMERACONTROL_FLAGS_MANUAL)
{
strcat_s(flags, sizeof(flags), "MANUAL | ");
}
if(info->flags & KSPROPERTY_CAMERACONTROL_FLAGS_RELATIVE)
{
strcat_s(flags, sizeof(flags), "RELATIVE");
}
else
{
strcat_s(flags, sizeof(flags), "ABSOLUTE");
}
printf(
" min: %d\n"
" max: %d\n"
" step: %d\n"
" def: %d\n"
" flags: 0x%08X (%s)\n",
info->min, info->max, info->step, info->def, info->flags, flags
);
}
/*
* Pans the camera by a given angle.
*
* The angle is given in degrees, positive values are clockwise rotation (seen from the top),
* negative values are counter-clockwise rotation. If the "Mirror horizontal" option is
* enabled, the panning sense is reversed.
*/
HRESULT set_mechanical_pan_relative(IAMCameraControl *pCameraControl, long value)
{
HRESULT hr = 0;
long flags = KSPROPERTY_CAMERACONTROL_FLAGS_RELATIVE | KSPROPERTY_CAMERACONTROL_FLAGS_MANUAL;
hr = pCameraControl->Set(CameraControl_Pan, value, flags);
if(hr != S_OK)
fprintf(stderr, "ERROR: Unable to set CameraControl_Pan property value to %d. (Error 0x%08X)\n", value, hr);
// Note that we need to wait until the movement is complete, otherwise the next request will
// fail with hr == 0x800700AA == HRESULT_FROM_WIN32(ERROR_BUSY).
Sleep(500);
return hr;
}
/*
* Tilts the camera by a given angle.
*
* The angle is given in degrees, positive values are downwards, negative values are upwards.
* If the "Mirror vertical" option is enabled, the tilting sense is reversed.
*/
HRESULT set_mechanical_tilt_relative(IAMCameraControl *pCameraControl, long value)
{
HRESULT hr = 0;
long flags = KSPROPERTY_CAMERACONTROL_FLAGS_RELATIVE | KSPROPERTY_CAMERACONTROL_FLAGS_MANUAL;
hr = pCameraControl->Set(CameraControl_Tilt, value, flags);
if(hr != S_OK)
fprintf(stderr, "ERROR: Unable to set CameraControl_Tilt property value to %d. (Error 0x%08X)\n", value, hr);
// Note that we need to wait until the movement is complete, otherwise the next request will
// fail with hr == 0x800700AA == HRESULT_FROM_WIN32(ERROR_BUSY).
Sleep(500);
return hr;
}
/*
* Resets the camera's pan/tilt position by moving into a corner and then back to the center.
*/
void reset_machanical_pan_tilt(IAMCameraControl *pCameraControl)
{
set_mechanical_pan_relative(pCameraControl, 180);
Sleep(500);
set_mechanical_tilt_relative(pCameraControl, 180);
Sleep(500);
set_mechanical_pan_relative(pCameraControl, -64);
Sleep(500);
set_mechanical_tilt_relative(pCameraControl, -24);
Sleep(500);
}
/*
* Sets the digital pan angle.
*
* Positive values pan to the right, negative values pan to the left. Note that the digital pan
* angle only has an influence if the digital zoom is active.
*/
HRESULT set_digital_pan_absolute(IAMCameraControl *pCameraControl, long value)
{
HRESULT hr = 0;
// Specifying the KSPROPERTY_CAMERACONTROL_FLAGS_ABSOLUTE flag instructs the driver
// to use digital instead of mechanical pan.
long flags = KSPROPERTY_CAMERACONTROL_FLAGS_ABSOLUTE | KSPROPERTY_CAMERACONTROL_FLAGS_MANUAL;
hr = pCameraControl->Set(CameraControl_Pan, value, flags);
if(hr != S_OK)
fprintf(stderr, "ERROR: Unable to set CameraControl_Pan property value to %d. (Error 0x%08X)\n", value, hr);
return hr;
}
/*
* Sets the digital tilt angle.
*
* Positive values tilt downwards, negative values tilt upwards. Note that the digital pan
* angle only has an influence if the digital zoom is active.
*/
HRESULT set_digital_tilt_absolute(IAMCameraControl *pCameraControl, long value)
{
HRESULT hr = 0;
// Specifying the KSPROPERTY_CAMERACONTROL_FLAGS_ABSOLUTE flag instructs the driver
// to use digital instead of mechanical tilt.
long flags = KSPROPERTY_CAMERACONTROL_FLAGS_ABSOLUTE | KSPROPERTY_CAMERACONTROL_FLAGS_MANUAL;
hr = pCameraControl->Set(CameraControl_Tilt, value, flags);
if(hr != S_OK)
fprintf(stderr, "ERROR: Unable to set CameraControl_Tilt property value to %d. (Error 0x%08X)\n", value, hr);
return hr;
}
/*
* Sets the digital zoom value.
*
* The minimum value is 50 and means no zoom (100%). The maximum value is 200
* and means 4x zoom (400%).
*/
HRESULT set_digital_zoom_absolute(IAMCameraControl *pCameraControl, long value)
{
HRESULT hr = 0;
long flags = KSPROPERTY_CAMERACONTROL_FLAGS_ABSOLUTE | KSPROPERTY_CAMERACONTROL_FLAGS_MANUAL;
hr = pCameraControl->Set(CameraControl_Zoom, value, flags);
if(hr != S_OK)
fprintf(stderr, "ERROR: Unable to set CameraControl_Zoom property value to %d. (Error 0x%08X)\n", value, hr);
return hr;
}
/*
* Resets the digital pan and tilt angles.
*/
void reset_digital_pan_tilt(IAMCameraControl *pCameraControl)
{
set_digital_pan_absolute(pCameraControl, 0);
set_digital_tilt_absolute(pCameraControl, 0);
}
/*
* Resets the digital zoom.
*/
void reset_digital_zoom(IAMCameraControl *pCameraControl)
{
set_digital_zoom_absolute(pCameraControl, 50);
}
/*
* Test a camera's pan/tilt properties
*
* See also:
*
* IAMCameraControl Interface
* http://msdn2.microsoft.com/en-us/library/ms783833.aspx
* PROPSETID_VIDCAP_CAMERACONTROL
* http://msdn2.microsoft.com/en-us/library/aa510754.aspx
*/
HRESULT test_pan_tilt(IBaseFilter *pBaseFilter)
{
HRESULT hr = 0;
IAMCameraControl *pCameraControl = NULL;
ControlInfo panInfo = { 0 };
ControlInfo tiltInfo = { 0 };
ControlInfo zoomInfo = { 0 };
long value = 0, flags = 0;
printf(" Reading pan/tilt property information ...\n");
// Get a pointer to the IAMCameraControl interface used to control the camera
hr = pBaseFilter->QueryInterface(IID_IAMCameraControl, (void **)&pCameraControl);
if(hr != S_OK)
{
fprintf(stderr, "ERROR: Unable to access IAMCameraControl interface.\n");
return hr;
}
// Retrieve information about the pan and tilt controls
hr = pCameraControl->GetRange(CameraControl_Pan, &panInfo.min, &panInfo.max, &panInfo.step, &panInfo.def, &panInfo.flags);
if(hr != S_OK)
{
fprintf(stderr, "ERROR: Unable to retrieve CameraControl_Pan property information.\n");
return hr;
}
printf(" Pan control:\n");
print_control_info(&panInfo);
hr = pCameraControl->GetRange(CameraControl_Tilt, &tiltInfo.min, &tiltInfo.max, &tiltInfo.step, &tiltInfo.def, &tiltInfo.flags);
if(hr != S_OK)
{
fprintf(stderr, "ERROR: Unable to retrieve CameraControl_Tilt property information.\n");
return hr;
}
printf(" Tilt control:\n");
print_control_info(&tiltInfo);
hr = pCameraControl->GetRange(CameraControl_Zoom, &zoomInfo.min, &zoomInfo.max, &zoomInfo.step, &zoomInfo.def, &zoomInfo.flags);
if(hr != S_OK)
{
fprintf(stderr, "ERROR: Unable to retrieve CameraControl_Zoom property information.\n");
return hr;
}
printf(" Zoom control:\n");
print_control_info(&zoomInfo);
//*
printf(" Resetting pan/tilt/zoom ...\n");
reset_machanical_pan_tilt(pCameraControl);
reset_digital_pan_tilt(pCameraControl);
reset_digital_zoom(pCameraControl);
Sleep(3000);
//*/
//*
printf(" Testing mechanical pan ...\n");
set_mechanical_pan_relative(pCameraControl, 40);
set_mechanical_pan_relative(pCameraControl, 20);
set_mechanical_pan_relative(pCameraControl, -20);
set_mechanical_pan_relative(pCameraControl, -40);
Sleep(3000);
//*/
//*
printf(" Testing mechanical tilt ...\n");
set_mechanical_tilt_relative(pCameraControl, 20);
set_mechanical_tilt_relative(pCameraControl, 10);
set_mechanical_tilt_relative(pCameraControl, -10);
set_mechanical_tilt_relative(pCameraControl, -20);
Sleep(3000);
//*/
//*
printf(" Testing digital pan/tilt/zoom ...\n");
set_digital_zoom_absolute(pCameraControl, 100); // Zoom to 200%
Sleep(1000);
set_digital_pan_absolute(pCameraControl, 40);
Sleep(1000);
set_digital_pan_absolute(pCameraControl, 80);
Sleep(1000);
set_digital_zoom_absolute(pCameraControl, 200); // Zoom to 400%
Sleep(1000);
set_digital_tilt_absolute(pCameraControl, 40);
Sleep(1000);
set_digital_tilt_absolute(pCameraControl, 60);
Sleep(1000);
reset_digital_pan_tilt(pCameraControl);
Sleep(1000);
reset_digital_zoom(pCameraControl);
Sleep(3000);
//*/
//*
printf(" Testing digital zoom ...\n");
for(int i = zoomInfo.min; i <= zoomInfo.max; i += zoomInfo.step)
{
set_digital_zoom_absolute(pCameraControl, i);
Sleep(10);
}
Sleep(1000);
for(int i = zoomInfo.max; i >= zoomInfo.min; i -= zoomInfo.step)
{
set_digital_zoom_absolute(pCameraControl, i);
Sleep(10);
}
//*/
return S_OK;
}
/*
* Do something with the filter. In this sample we just test the pan/tilt properties.
*/
void process_filter(IBaseFilter *pBaseFilter)
{
test_pan_tilt(pBaseFilter);
}
/*
* Enumerate all video devices
*
* See also:
*
* Using the System Device Enumerator:
* http://msdn2.microsoft.com/en-us/library/ms787871.aspx
*/
int enum_devices()
{
HRESULT hr;
printf("Enumerating video input devices ...\n");
// Create the System Device Enumerator.
ICreateDevEnum *pSysDevEnum = NULL;
hr = CoCreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER,
IID_ICreateDevEnum, (void **)&pSysDevEnum);
if(FAILED(hr))
{
fprintf(stderr, "ERROR: Unable to create system device enumerator.\n");
return hr;
}
// Obtain a class enumerator for the video input device category.
IEnumMoniker *pEnumCat = NULL;
hr = pSysDevEnum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &pEnumCat, 0);
if(hr == S_OK)
{
// Enumerate the monikers.
IMoniker *pMoniker = NULL;
ULONG cFetched;
while(pEnumCat->Next(1, &pMoniker, &cFetched) == S_OK)
{
IPropertyBag *pPropBag;
hr = pMoniker->BindToStorage(0, 0, IID_IPropertyBag,
(void **)&pPropBag);
if(SUCCEEDED(hr))
{
// To retrieve the filter's friendly name, do the following:
VARIANT varName;
VariantInit(&varName);
hr = pPropBag->Read(L"FriendlyName", &varName, 0);
if (SUCCEEDED(hr))
{
// Display the name in your UI somehow.
wprintf(L" Found device: %s\n", varName.bstrVal);
}
VariantClear(&varName);
// To create an instance of the filter, do the following:
IBaseFilter *pFilter;
hr = pMoniker->BindToObject(NULL, NULL, IID_IBaseFilter,
(void**)&pFilter);
process_filter(pFilter);
//Remember to release pFilter later.
pPropBag->Release();
}
pMoniker->Release();
}
pEnumCat->Release();
}
pSysDevEnum->Release();
return 0;
}
int wmain(int argc, wchar_t* argv[])
{
int result;
CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
result = enum_devices();
CoUninitialize();
return result;
}
Related
I'm working on a code to capture the desktop using Desktop duplication and encode the same to h264 using Intel hardwareMFT. The encoder only accepts NV12 format as input. I have got a DXGI_FORMAT_B8G8R8A8_UNORM to NV12 converter(https://github.com/NVIDIA/video-sdk-samples/blob/master/nvEncDXGIOutputDuplicationSample/Preproc.cpp) that works fine, and is based on DirectX VideoProcessor.
The problem is that the VideoProcessor on certain intel graphics hardware supports conversions only from DXGI_FORMAT_B8G8R8A8_UNORM to YUY2 but not NV12, I have confirmed the same by enumerating the supported formats through GetVideoProcessorOutputFormats. Though the VideoProcessor Blt succeeded without any errors, and I could see that the frames in the output video are pixelated a bit, I could notice it if I look at it closely.
I guess, the VideoProcessor has simply failed over to the next supported output format (YUY2) and I'm unknowingly feeding it to the encoder that thinks that the input is in NV12 as configured. There is no failure or major corruption of frames due to the fact that there is little difference like byte order and subsampling between NV12 and YUY2. Also, I don't have pixelating problems on hardware that supports NV12 conversion.
So I decided to do the color conversion using pixel shaders which is based on this code(https://github.com/bavulapati/DXGICaptureDXColorSpaceConversionIntelEncode/blob/master/DXGICaptureDXColorSpaceConversionIntelEncode/DuplicationManager.cpp). I'm able make the pixel shaders work, I have also uploaded my code here(https://codeshare.io/5PJjxP) for reference (simplified it as much as possible).
Now, I'm left with two channels, chroma, and luma respectively
(ID3D11Texture2D textures). And I'm really confused about efficiently
packing the two separate channels into one ID3D11Texture2D texture so
that I may feed the same to the encoder. Is there a way to efficiently
pack the Y and UV channels into a single ID3D11Texture2D in GPU? I'm
really tired of CPU based approaches due to the fact that it's costly,
and doesn't offer the best possible frame rates. In fact, I'm
reluctant to even copy the textures to CPU. I'm thinking of a way to
do it in GPU without any back and forth copies between CPU and GPU.
I have been researching this for quite some time without any progress, any help would be appreciated.
/**
* This method is incomplete. It's just a template of what I want to achieve.
*/
HRESULT CreateNV12TextureFromLumaAndChromaSurface(ID3D11Texture2D** pOutputTexture)
{
HRESULT hr = S_OK;
try
{
//Copying from GPU to CPU. Bad :(
m_pD3D11DeviceContext->CopyResource(m_CPUAccessibleLuminanceSurf, m_LuminanceSurf);
D3D11_MAPPED_SUBRESOURCE resource;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
HRESULT hr = m_pD3D11DeviceContext->Map(m_CPUAccessibleLuminanceSurf, subresource, D3D11_MAP_READ, 0, &resource);
BYTE* sptr = reinterpret_cast<BYTE*>(resource.pData);
BYTE* dptrY = nullptr; // point to the address of Y channel in output surface
//Store Image Pitch
int m_ImagePitch = resource.RowPitch;
int height = GetImageHeight();
int width = GetImageWidth();
for (int i = 0; i < height; i++)
{
memcpy_s(dptrY, m_ImagePitch, sptr, m_ImagePitch);
sptr += m_ImagePitch;
dptrY += m_ImagePitch;
}
m_pD3D11DeviceContext->Unmap(m_CPUAccessibleLuminanceSurf, subresource);
//Copying from GPU to CPU. Bad :(
m_pD3D11DeviceContext->CopyResource(m_CPUAccessibleChrominanceSurf, m_ChrominanceSurf);
hr = m_pD3D11DeviceContext->Map(m_CPUAccessibleChrominanceSurf, subresource, D3D11_MAP_READ, 0, &resource);
sptr = reinterpret_cast<BYTE*>(resource.pData);
BYTE* dptrUV = nullptr; // point to the address of UV channel in output surface
m_ImagePitch = resource.RowPitch;
height /= 2;
width /= 2;
for (int i = 0; i < height; i++)
{
memcpy_s(dptrUV, m_ImagePitch, sptr, m_ImagePitch);
sptr += m_ImagePitch;
dptrUV += m_ImagePitch;
}
m_pD3D11DeviceContext->Unmap(m_CPUAccessibleChrominanceSurf, subresource);
}
catch(HRESULT){}
return hr;
}
Draw NV12:
//
// Draw frame for NV12 texture
//
HRESULT DrawNV12Frame(ID3D11Texture2D* inputTexture)
{
HRESULT hr;
// If window was resized, resize swapchain
if (!m_bIntialized)
{
HRESULT Ret = InitializeNV12Surfaces(inputTexture);
if (!SUCCEEDED(Ret))
{
return Ret;
}
m_bIntialized = true;
}
m_pD3D11DeviceContext->CopyResource(m_ShaderResourceSurf, inputTexture);
D3D11_TEXTURE2D_DESC FrameDesc;
m_ShaderResourceSurf->GetDesc(&FrameDesc);
D3D11_SHADER_RESOURCE_VIEW_DESC ShaderDesc;
ShaderDesc.Format = FrameDesc.Format;
ShaderDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
ShaderDesc.Texture2D.MostDetailedMip = FrameDesc.MipLevels - 1;
ShaderDesc.Texture2D.MipLevels = FrameDesc.MipLevels;
// Create new shader resource view
ID3D11ShaderResourceView* ShaderResource = nullptr;
hr = m_pD3D11Device->CreateShaderResourceView(m_ShaderResourceSurf, &ShaderDesc, &ShaderResource);
IF_FAILED_THROW(hr);
m_pD3D11DeviceContext->PSSetShaderResources(0, 1, &ShaderResource);
// Set resources
m_pD3D11DeviceContext->OMSetRenderTargets(1, &m_pLumaRT, nullptr);
m_pD3D11DeviceContext->PSSetShader(m_pPixelShaderLuma, nullptr, 0);
m_pD3D11DeviceContext->RSSetViewports(1, &m_VPLuminance);
// Draw textured quad onto render target
m_pD3D11DeviceContext->Draw(NUMVERTICES, 0);
m_pD3D11DeviceContext->OMSetRenderTargets(1, &m_pChromaRT, nullptr);
m_pD3D11DeviceContext->PSSetShader(m_pPixelShaderChroma, nullptr, 0);
m_pD3D11DeviceContext->RSSetViewports(1, &m_VPChrominance);
// Draw textured quad onto render target
m_pD3D11DeviceContext->Draw(NUMVERTICES, 0);
// Release shader resource
ShaderResource->Release();
ShaderResource = nullptr;
return S_OK;
}
Init shaders:
void SetViewPort(D3D11_VIEWPORT* VP, UINT Width, UINT Height)
{
VP->Width = static_cast<FLOAT>(Width);
VP->Height = static_cast<FLOAT>(Height);
VP->MinDepth = 0.0f;
VP->MaxDepth = 1.0f;
VP->TopLeftX = 0;
VP->TopLeftY = 0;
}
HRESULT MakeRTV(ID3D11RenderTargetView** pRTV, ID3D11Texture2D* pSurf)
{
if (*pRTV)
{
(*pRTV)->Release();
*pRTV = nullptr;
}
// Create a render target view
HRESULT hr = m_pD3D11Device->CreateRenderTargetView(pSurf, nullptr, pRTV);
IF_FAILED_THROW(hr);
return S_OK;
}
HRESULT InitializeNV12Surfaces(ID3D11Texture2D* inputTexture)
{
ReleaseSurfaces();
D3D11_TEXTURE2D_DESC lOutputDuplDesc;
inputTexture->GetDesc(&lOutputDuplDesc);
// Create shared texture for all duplication threads to draw into
D3D11_TEXTURE2D_DESC DeskTexD;
RtlZeroMemory(&DeskTexD, sizeof(D3D11_TEXTURE2D_DESC));
DeskTexD.Width = lOutputDuplDesc.Width;
DeskTexD.Height = lOutputDuplDesc.Height;
DeskTexD.MipLevels = 1;
DeskTexD.ArraySize = 1;
DeskTexD.Format = lOutputDuplDesc.Format;
DeskTexD.SampleDesc.Count = 1;
DeskTexD.Usage = D3D11_USAGE_DEFAULT;
DeskTexD.BindFlags = D3D11_BIND_SHADER_RESOURCE;
HRESULT hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_ShaderResourceSurf);
IF_FAILED_THROW(hr);
DeskTexD.Format = DXGI_FORMAT_R8_UNORM;
DeskTexD.BindFlags = D3D11_BIND_RENDER_TARGET;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_LuminanceSurf);
IF_FAILED_THROW(hr);
DeskTexD.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
DeskTexD.Usage = D3D11_USAGE_STAGING;
DeskTexD.BindFlags = 0;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, NULL, &m_CPUAccessibleLuminanceSurf);
IF_FAILED_THROW(hr);
SetViewPort(&m_VPLuminance, DeskTexD.Width, DeskTexD.Height);
HRESULT Ret = MakeRTV(&m_pLumaRT, m_LuminanceSurf);
if (!SUCCEEDED(Ret))
return Ret;
DeskTexD.Width = lOutputDuplDesc.Width / 2;
DeskTexD.Height = lOutputDuplDesc.Height / 2;
DeskTexD.Format = DXGI_FORMAT_R8G8_UNORM;
DeskTexD.Usage = D3D11_USAGE_DEFAULT;
DeskTexD.CPUAccessFlags = 0;
DeskTexD.BindFlags = D3D11_BIND_RENDER_TARGET;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_ChrominanceSurf);
IF_FAILED_THROW(hr);
DeskTexD.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
DeskTexD.Usage = D3D11_USAGE_STAGING;
DeskTexD.BindFlags = 0;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, NULL, &m_CPUAccessibleChrominanceSurf);
IF_FAILED_THROW(hr);
SetViewPort(&m_VPChrominance, DeskTexD.Width, DeskTexD.Height);
return MakeRTV(&m_pChromaRT, m_ChrominanceSurf);
}
HRESULT InitVertexShader(ID3D11VertexShader** ppID3D11VertexShader)
{
HRESULT hr = S_OK;
UINT Size = ARRAYSIZE(g_VS);
try
{
IF_FAILED_THROW(m_pD3D11Device->CreateVertexShader(g_VS, Size, NULL, ppID3D11VertexShader));;
m_pD3D11DeviceContext->VSSetShader(m_pVertexShader, nullptr, 0);
// Vertices for drawing whole texture
VERTEX Vertices[NUMVERTICES] =
{
{ XMFLOAT3(-1.0f, -1.0f, 0), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 0), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, 0), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(1.0f, -1.0f, 0), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 0), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, 0), XMFLOAT2(1.0f, 0.0f) },
};
UINT Stride = sizeof(VERTEX);
UINT Offset = 0;
D3D11_BUFFER_DESC BufferDesc;
RtlZeroMemory(&BufferDesc, sizeof(BufferDesc));
BufferDesc.Usage = D3D11_USAGE_DEFAULT;
BufferDesc.ByteWidth = sizeof(VERTEX) * NUMVERTICES;
BufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
BufferDesc.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA InitData;
RtlZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = Vertices;
// Create vertex buffer
IF_FAILED_THROW(m_pD3D11Device->CreateBuffer(&BufferDesc, &InitData, &m_VertexBuffer));
m_pD3D11DeviceContext->IASetVertexBuffers(0, 1, &m_VertexBuffer, &Stride, &Offset);
m_pD3D11DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
D3D11_INPUT_ELEMENT_DESC Layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
UINT NumElements = ARRAYSIZE(Layout);
hr = m_pD3D11Device->CreateInputLayout(Layout, NumElements, g_VS, Size, &m_pVertexLayout);
m_pD3D11DeviceContext->IASetInputLayout(m_pVertexLayout);
}
catch (HRESULT) {}
return hr;
}
HRESULT InitPixelShaders()
{
HRESULT hr = S_OK;
// Refer https://codeshare.io/5PJjxP for g_PS_Y & g_PS_UV blobs
try
{
UINT Size = ARRAYSIZE(g_PS_Y);
hr = m_pD3D11Device->CreatePixelShader(g_PS_Y, Size, nullptr, &m_pPixelShaderChroma);
IF_FAILED_THROW(hr);
Size = ARRAYSIZE(g_PS_UV);
hr = m_pD3D11Device->CreatePixelShader(g_PS_UV, Size, nullptr, &m_pPixelShaderLuma);
IF_FAILED_THROW(hr);
}
catch (HRESULT) {}
return hr;
}
I am experimenting this RGBA conversion to NV12 in the GPU only, using DirectX11.
This is a good challenge. I'm not familiar with Directx11, so this is my first experimentation.
Check this project for updates : D3D11ShaderNV12
In my current implementation (may not be the last), here is what I do:
Step 1: use a DXGI_FORMAT_B8G8R8A8_UNORM as input texture
Step 2: make a 1st pass shader to get 3 textures (Y:Luma, U:ChromaCb and V:ChromaCr): see YCbCrPS2.hlsl
Step 3: Y is DXGI_FORMAT_R8_UNORM, and is ready for final NV12 texture
Step 4: UV needs to be downsampled in a 2nd pass shader: see ScreenPS2.hlsl (using linear filtering)
Step 5: a third pass shader to sample Y texture
Step 6: a fourth pass shader to sample UV texture using a shift texture (I think other technique could be use)
My final texture is not DXGI_FORMAT_NV12, but a similar DXGI_FORMAT_R8_UNORM texture. My computer is Windows7, so DXGI_FORMAT_NV12 is not handled. I will try later on a another computer.
The process with pictures:
I use librealsense2 library.
I refer to this site.. https://github.com/IntelRealSense/librealsense/blob/master/examples/align/rs-align.cpp
After mapping depth image to color image with realsense2 library,
I want to display the image with opencv Mat(imshow) function.
so i coded as..
#include "librealsense2/rs.hpp"
#include <opencv2/opencv.hpp>
#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cstring>
using namespace std;
using namespace cv;
void remove_background(rs2::video_frame& other, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);
float get_depth_scale(rs2::device dev);
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams);
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev);
int main(int args, char * argv[]) try
{
// Create and initialize GUI related objects
rs2::colorizer c;
rs2::config cfg;
rs2::pipeline pipe;
const int width = 1280;
const int height = 720;
c.set_option(RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED, 1.f);
c.set_option(RS2_OPTION_COLOR_SCHEME, 2.f); // White to Black
cfg.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_BGR8, 30);
cfg.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, 30);
rs2::pipeline_profile profile = pipe.start(cfg);
float depth_scale = get_depth_scale(profile.get_device());
rs2_stream align_to = find_stream_to_align(profile.get_streams());
rs2::align align(align_to);
float depth_clipping_distance = 3.f;
while (true)
{
rs2::frameset frameset = pipe.wait_for_frames();
if (profile_changed(pipe.get_active_profile().get_streams(), profile.get_streams()))
{
profile = pipe.get_active_profile();
align_to = find_stream_to_align(profile.get_streams());
align = rs2::align(align_to);
depth_scale = get_depth_scale(profile.get_device());
}
auto processed = align.process(frameset);
rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());
if (!aligned_depth_frame || !other_frame)
{
continue;
}
remove_background(other_frame, aligned_depth_frame, depth_scale, depth_clipping_distance);
Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);
namedWindow("other window", WINDOW_AUTOSIZE);
namedWindow("depth window", WINDOW_AUTOSIZE);
imshow("other window", other_frameaM);
imshow("depth window", aligned_depthM);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));
int width = other_frame.get_width();
int height = other_frame.get_height();
int other_bpp = other_frame.get_bytes_per_pixel();
#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
for (int y = 0; y < height; y++)
{
auto depth_pixel_index = y * width;
for (int x = 0; x < width; x++, ++depth_pixel_index)
{
// Get the depth value of the current pixel
auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];
// Check if the depth value is invalid (<=0) or greater than the threashold
if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
{
// Calculate the offset in other frame's buffer to current pixel
auto offset = depth_pixel_index * other_bpp;
// Set pixel to "background" color (0x999999)
std::memset(&p_other_frame[offset], 0x99, other_bpp);
}
}
}
}
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams)
{
//Given a vector of streams, we try to find a depth stream and another stream to align depth with.
//We prioritize color streams to make the view look better.
//If color is not available, we take another stream that (other than depth)
rs2_stream align_to = RS2_STREAM_ANY;
bool depth_stream_found = false;
bool color_stream_found = false;
for (rs2::stream_profile sp : streams)
{
rs2_stream profile_stream = sp.stream_type();
if (profile_stream != RS2_STREAM_DEPTH)
{
if (!color_stream_found) //Prefer color
align_to = profile_stream;
if (profile_stream == RS2_STREAM_COLOR)
{
color_stream_found = true;
}
}
else
{
depth_stream_found = true;
}
}
if (!depth_stream_found)
throw std::runtime_error("No Depth stream available");
if (align_to == RS2_STREAM_ANY)
throw std::runtime_error("No stream found to align with Depth");
return align_to;
}
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
for (auto&& sp : prev)
{
//If previous profile is in current (maybe just added another)
auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
if (itr == std::end(current)) //If it previous stream wasn't found in current
{
return true;
}
}
return false;
}
There are only gray screens and nothing happens.
Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);
I guess there are no problem. because the depth image and rgb image were opened well in CV_8UC3 format.
However, when I try to calibrate and then I got it in opencv, the image appears only in gray screen.
auto frames = pipe.wait_for_frames(); // Wait for next set of frames from the camera
rs2::video_frame color = frames.get_color_frame();
rs2::depth_frame depth = color_map(frames.get_depth_frame());
if (!color)
color = frames.get_infrared_frame();
Mat colorM(Size(width, height), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
Mat depthM(Size(width, height), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);
It is a part of code that output color image and depth image.
This works well.
so I guess..
rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());
Whatever the process, I thought it would run because it fetches it in frame format. I think I have a very big mistake on this code side.
Which part is wrong?
enter image description here
There are several ways to store an image in memory. There is no guarantee that you can just pass the buffer and it'll all work. try to copy pixel by pixel.
You should know that OpenCV uses BGR interleaved image format, while realsense might use another.
1) Get aligned frames
frameset data = pipe.wait_for_frames();
frameset aligned_set = align_to.process(data);
auto color_mat = frame_to_mat(aligned_set.get_color_frame());
auto depth_mat = frame_to_mat(aligned_set.get_depth_frame());
2) frame_to_mat helper function
cv::Mat frame_to_mat(const rs2::frame& f)
{
using namespace cv;
using namespace rs2;
auto vf = f.as<video_frame>();
const int w = vf.get_width();
const int h = vf.get_height();
if (f.get_profile().format() == RS2_FORMAT_BGR8)
{
return Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_RGB8)
{
auto r = Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
cvtColor(r, r, CV_RGB2BGR);
return r;
}
else if (f.get_profile().format() == RS2_FORMAT_Z16)
{
return Mat(Size(w, h), CV_16UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_Y8)
{
return Mat(Size(w, h), CV_8UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
throw std::runtime_error("Frame format is not supported yet!");
}
How can I flipped the SharpDX.Databox without converting it to bitmap?
I'm making a screen recording using SharpDX and Media foundation. Below is the code on how I get the Databox.
mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0,SharpDX.Direct3D11.MapMode.Read, SharpDX.Direct3D11.MapFlags.None);
But when I passed the mapSource in mediafoundation.net I produced a vertical video.
IMFSample sample = null;
IMFMediaBuffer buffer = null;
IntPtr data = new IntPtr();
int bufferMaxLength;
int bufferCurrentLength;
int hr = (int)MFExtern.MFCreateMemoryBuffer(frameSizeBytes, out buffer);
if (Succeeded(hr)) hr = (int)buffer.Lock(out data, out bufferMaxLength, out bufferCurrentLength);
if (Succeeded(hr))
{
hr = (int)MFExtern.MFCopyImage(data, videoWidth * BYTES_PER_PIXEL, mapSource.DataPointer, videoWidth * BYTES_PER_PIXEL, videoWidth * BYTES_PER_PIXEL, videoHeight);
}
if (Succeeded(hr)) hr = (int)buffer.Unlock();
if (Succeeded(hr)) hr = (int)buffer.SetCurrentLength(frameSizeBytes);
if (Succeeded(hr)) hr = (int)MFExtern.MFCreateSample(out sample);
if (Succeeded(hr)) hr = (int)sample.AddBuffer(buffer);
if (Succeeded(hr)) hr = (int)sample.SetSampleTime(frame.prevRecordingDuration.Ticks);//(TICKS_PER_SECOND * frames / VIDEO_FPS);
if (Succeeded(hr)) hr = (int)sample.SetSampleDuration((frame.recordDuration-frame.prevRecordingDuration).Ticks);
if (Succeeded(hr)) hr = (int)sinkWriter.WriteSample(streamIndex, sample);
if (Succeeded(hr)) frames++;
COMBase.SafeRelease(sample);
COMBase.SafeRelease(buffer);
enter image description here
In your code there is a mistake in code with MFCopyImage. According MFCopyImage function you must set
_In_ LONG lDestStride,
_In_ const BYTE *pSrc,
_In_ LONG lSrcStride, - lDestStride and lSrcStride - is width of memory for storing one line of pixels - your computing videoWidth * BYTES_PER_PIXEL is not correct, because for Windows RGB format stride can be widther than videoWidth * BYTES_PER_PIXEL. You must compute destination stride by function MFGetStrideForBitmapInfoHeader, source stride you can get from you image source code - I do not know you code, but for my project I used
D3D11_MAPPED_SUBRESOURCE resource;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
ctx->Map(mDestImage, subresource, D3D11_MAP_READ_WRITE, 0, &resource);
LOG_INVOKE_MF_FUNCTION(MFCopyImage,
aPtrData,
mStride,
(BYTE*)resource.pData,
resource.RowPitch,
RowPitch.
Regards.
P.S. Destination stride mStride can be negative - it means that it needs write from last line to the first. It can done by the next changing of destination pointer - aPtrData += (mHeight - 1)*mStride;
// Draw a grid background.
int width = static_cast<int>(rtSize.width);
int height = static_cast<int>(rtSize.height);
for (int x = 0; x < width; x += 10)
{
m_pRenderTarget->DrawLine(
D2D1::Point2F(static_cast<FLOAT>(x), 0.0f),
D2D1::Point2F(static_cast<FLOAT>(x), rtSize.height),
m_pLightSlateGrayBrush,
0.5f
);
}
This is the sample in the documentation. I've included "D2d1.h", I just don't know how to create a "m_pRenderTarget". I'm writing a Kinect project, I want to draw a line on the image. I'm really new, please help me.
Have you see this page?
Create an ID2D1HwndRenderTarget
The quick start tutorial has a detail steps of how to use Direct2D.
You can also download the Windows SDK, the samples contains Direct2D demo which has the full steps of how to create Direct2D render target
I have write a program to draw a rectangle, with a little change, it can draw a line, just for your reference
#include <windows.h>
#include <D2D1.h>
#define SAFE_RELEASE(P) if(P){P->Release() ; P = NULL ;}
ID2D1Factory* g_pD2DFactory = NULL; // Direct2D factory
ID2D1HwndRenderTarget* g_pRenderTarget = NULL; // Render target
ID2D1SolidColorBrush* g_pBlackBrush = NULL; // A black brush, reflect the line color
VOID CreateD2DResource(HWND hWnd)
{
if (!g_pRenderTarget)
{
HRESULT hr ;
hr = D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &g_pD2DFactory) ;
if (FAILED(hr))
{
MessageBox(hWnd, "Create D2D factory failed!", "Error", 0) ;
return ;
}
// Obtain the size of the drawing area
RECT rc ;
GetClientRect(hWnd, &rc) ;
// Create a Direct2D render target
hr = g_pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hWnd,
D2D1::SizeU(rc.right - rc.left,rc.bottom - rc.top)
),
&g_pRenderTarget
) ;
if (FAILED(hr))
{
MessageBox(hWnd, "Create render target failed!", "Error", 0) ;
return ;
}
// Create a brush
hr = g_pRenderTarget->CreateSolidColorBrush(
D2D1::ColorF(D2D1::ColorF::Black),
&g_pBlackBrush
) ;
if (FAILED(hr))
{
MessageBox(hWnd, "Create brush failed!", "Error", 0) ;
return ;
}
}
}
VOID DrawLine(HWND hwnd)
{
CreateD2DResource(hwnd) ;
g_pRenderTarget->BeginDraw() ;
// Clear background color to White
g_pRenderTarget->Clear(D2D1::ColorF(D2D1::ColorF::White));
// Draw Rectangle
g_pRenderTarget->DrawLine(
D2D1::Point2F(100.0f, 100.0f),
D2D1::Point2F(500.0f, 500.0f),
g_pBlackBrush
);
HRESULT hr = g_pRenderTarget->EndDraw() ;
if (FAILED(hr))
{
MessageBox(NULL, "Draw failed!", "Error", 0) ;
return ;
}
}
VOID Cleanup()
{
SAFE_RELEASE(g_pRenderTarget) ;
SAFE_RELEASE(g_pBlackBrush) ;
SAFE_RELEASE(g_pD2DFactory) ;
}
LRESULT CALLBACK WndProc(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
switch (message)
{
case WM_PAINT:
DrawLine(hwnd) ;
return 0 ;
case WM_KEYDOWN:
{
switch( wParam )
{
case VK_ESCAPE:
SendMessage( hwnd, WM_CLOSE, 0, 0 );
break ;
default:
break ;
}
}
break ;
case WM_DESTROY:
Cleanup();
PostQuitMessage( 0 );
return 0;
}
return DefWindowProc (hwnd, message, wParam, lParam) ;
}
int WINAPI WinMain( HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow )
{
WNDCLASSEX winClass ;
winClass.lpszClassName = "Direct2D";
winClass.cbSize = sizeof(WNDCLASSEX);
winClass.style = CS_HREDRAW | CS_VREDRAW;
winClass.lpfnWndProc = WndProc;
winClass.hInstance = hInstance;
winClass.hIcon = NULL ;
winClass.hIconSm = NULL ;
winClass.hCursor = LoadCursor(NULL, IDC_ARROW);
winClass.hbrBackground = NULL ;
winClass.lpszMenuName = NULL;
winClass.cbClsExtra = 0;
winClass.cbWndExtra = 0;
if (!RegisterClassEx (&winClass))
{
MessageBox ( NULL, TEXT( "This program requires Windows NT!" ), "error", MB_ICONERROR) ;
return 0 ;
}
HWND hwnd = CreateWindowEx(NULL,
"Direct2D", // window class name
"Draw Rectangle", // window caption
WS_OVERLAPPEDWINDOW, // window style
CW_USEDEFAULT, // initial x position
CW_USEDEFAULT, // initial y position
600, // initial x size
600, // initial y size
NULL, // parent window handle
NULL, // window menu handle
hInstance, // program instance handle
NULL) ; // creation parameters
ShowWindow (hwnd, iCmdShow) ;
UpdateWindow (hwnd) ;
MSG msg ;
ZeroMemory(&msg, sizeof(msg)) ;
while (GetMessage (&msg, NULL, 0, 0))
{
TranslateMessage (&msg) ;
DispatchMessage (&msg) ;
}
return msg.wParam ;
}
I have been writing my own library using Direct X and have hit an odd issue. Whilst trying to render an animating sprite I am simply seeing a big black square:
I have stepped through the code obsessively and have concluded that it must be something about the loading of the actual sprites, because everything that I can see in my code is fine. Obviously, I cannot step into the functions such as BltFast, and so cannot tell if my sprite surfaces are being blitted onto the backbuffer successfully.
Here are my load and render functions for the sprite:
SPRITE::LOAD
/**
* loads a bitmap file and copies it to a directdraw surface
*
* #param pID wait
* #param pFileName name of the bitmap file to load into memory
*/
void Sprite::Load (const char *pID, const char *pFileName)
{
// initialises the member variables with the new image id and file name
mID = pID;
mFileName = pFileName;
// creates the necessary variables
HBITMAP tHBM;
BITMAP tBM;
DDSURFACEDESC2 tDDSD;
IDirectDrawSurface7 *tDDS;
// stores bitmap image into HBITMAP handler
tHBM = static_cast<HBITMAP> (LoadImage (NULL, pFileName, IMAGE_BITMAP, 0, 0, LR_LOADFROMFILE | LR_CREATEDIBSECTION));
GetObject (tHBM, sizeof (tBM), &tBM);
// create surface for the HBITMAP to be copied onto
ZeroMemory (&tDDSD, sizeof (tDDSD));
tDDSD.dwSize = sizeof (tDDSD);
tDDSD.dwFlags = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH;
tDDSD.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN;
tDDSD.dwWidth = tBM.bmWidth;
tDDSD.dwHeight = tBM.bmHeight;
DirectDraw::GetInstance ()->DirectDrawObject()->CreateSurface (&tDDSD, &tDDS, NULL);
// copying bitmap image onto surface
CopyBitmap(tDDS, tHBM, 0, 0, 0, 0);
// deletes bitmap image now that it has been used
DeleteObject(tHBM);
// stores the new width and height of the image
mSpriteWidth = tBM.bmWidth;
mSpriteHeight = tBM.bmHeight;
// sets the address of the bitmap surface to this temporary surface with the new bitmap image
mBitmapSurface = tDDS;
}
SPRITE::RENDER
/**
* renders the sprites surface to the back buffer
*
* #param pBackBuffer surface to render the sprite to
* #param pX x co-ordinate to render to (default is 0)
* #param pY y co-ordinate to render to (default is 0)
*/
void Sprite::Render (LPDIRECTDRAWSURFACE7 &pBackBuffer, float pX, float pY)
{
if (mSpriteWidth > 800) mSpriteWidth = 800;
RECT tFrom;
tFrom.left = tFrom.top = 0;
tFrom.right = mSpriteWidth;
tFrom.bottom = mSpriteHeight;
// bltfast parameters are (position x, position y, dd surface, draw rect, wait flag)
// pBackBuffer->BltFast (0 + DirectDraw::GetInstance()->ScreenWidth(), 0, mBitmapSurface, &tFrom, DDBLTFAST_WAIT);
pBackBuffer->BltFast (static_cast<DWORD>(pX + DirectDraw::GetInstance()->ScreenWidth()),
static_cast<DWORD>(pY), mBitmapSurface, &tFrom, DDBLTFAST_WAIT);
}
The surfaces were simply not a compatible format.
Here's the fixed copybitmap function which I now call in the load function:
extern "C" HRESULT
DDCopyBitmap(IDirectDrawSurface7 * pdds, HBITMAP hbm, int x, int y,
int dx, int dy)
{
HDC hdcImage;
HDC hdc;
BITMAP bm;
DDSURFACEDESC2 ddsd;
HRESULT hr;
if (hbm == NULL || pdds == NULL)
return E_FAIL;
//
// Make sure this surface is restored.
//
pdds->Restore();
//
// Select bitmap into a memoryDC so we can use it.
//
hdcImage = CreateCompatibleDC(NULL);
if (!hdcImage)
OutputDebugString("createcompatible dc failed\n");
SelectObject(hdcImage, hbm);
//
// Get size of the bitmap
//
GetObject(hbm, sizeof(bm), &bm);
dx = dx == 0 ? bm.bmWidth : dx; // Use the passed size, unless zero
dy = dy == 0 ? bm.bmHeight : dy;
//
// Get size of surface.
//
ddsd.dwSize = sizeof(ddsd);
ddsd.dwFlags = DDSD_HEIGHT | DDSD_WIDTH;
pdds->GetSurfaceDesc(&ddsd);
if ((hr = pdds->GetDC(&hdc)) == DD_OK)
{
StretchBlt(hdc, 0, 0, ddsd.dwWidth, ddsd.dwHeight, hdcImage, x, y,
dx, dy, SRCCOPY);
pdds->ReleaseDC(hdc);
}
DeleteDC(hdcImage);
return hr;
}