I have been writing my own library using Direct X and have hit an odd issue. Whilst trying to render an animating sprite I am simply seeing a big black square:
I have stepped through the code obsessively and have concluded that it must be something about the loading of the actual sprites, because everything that I can see in my code is fine. Obviously, I cannot step into the functions such as BltFast, and so cannot tell if my sprite surfaces are being blitted onto the backbuffer successfully.
Here are my load and render functions for the sprite:
SPRITE::LOAD
/**
* loads a bitmap file and copies it to a directdraw surface
*
* #param pID wait
* #param pFileName name of the bitmap file to load into memory
*/
void Sprite::Load (const char *pID, const char *pFileName)
{
// initialises the member variables with the new image id and file name
mID = pID;
mFileName = pFileName;
// creates the necessary variables
HBITMAP tHBM;
BITMAP tBM;
DDSURFACEDESC2 tDDSD;
IDirectDrawSurface7 *tDDS;
// stores bitmap image into HBITMAP handler
tHBM = static_cast<HBITMAP> (LoadImage (NULL, pFileName, IMAGE_BITMAP, 0, 0, LR_LOADFROMFILE | LR_CREATEDIBSECTION));
GetObject (tHBM, sizeof (tBM), &tBM);
// create surface for the HBITMAP to be copied onto
ZeroMemory (&tDDSD, sizeof (tDDSD));
tDDSD.dwSize = sizeof (tDDSD);
tDDSD.dwFlags = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH;
tDDSD.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN;
tDDSD.dwWidth = tBM.bmWidth;
tDDSD.dwHeight = tBM.bmHeight;
DirectDraw::GetInstance ()->DirectDrawObject()->CreateSurface (&tDDSD, &tDDS, NULL);
// copying bitmap image onto surface
CopyBitmap(tDDS, tHBM, 0, 0, 0, 0);
// deletes bitmap image now that it has been used
DeleteObject(tHBM);
// stores the new width and height of the image
mSpriteWidth = tBM.bmWidth;
mSpriteHeight = tBM.bmHeight;
// sets the address of the bitmap surface to this temporary surface with the new bitmap image
mBitmapSurface = tDDS;
}
SPRITE::RENDER
/**
* renders the sprites surface to the back buffer
*
* #param pBackBuffer surface to render the sprite to
* #param pX x co-ordinate to render to (default is 0)
* #param pY y co-ordinate to render to (default is 0)
*/
void Sprite::Render (LPDIRECTDRAWSURFACE7 &pBackBuffer, float pX, float pY)
{
if (mSpriteWidth > 800) mSpriteWidth = 800;
RECT tFrom;
tFrom.left = tFrom.top = 0;
tFrom.right = mSpriteWidth;
tFrom.bottom = mSpriteHeight;
// bltfast parameters are (position x, position y, dd surface, draw rect, wait flag)
// pBackBuffer->BltFast (0 + DirectDraw::GetInstance()->ScreenWidth(), 0, mBitmapSurface, &tFrom, DDBLTFAST_WAIT);
pBackBuffer->BltFast (static_cast<DWORD>(pX + DirectDraw::GetInstance()->ScreenWidth()),
static_cast<DWORD>(pY), mBitmapSurface, &tFrom, DDBLTFAST_WAIT);
}
The surfaces were simply not a compatible format.
Here's the fixed copybitmap function which I now call in the load function:
extern "C" HRESULT
DDCopyBitmap(IDirectDrawSurface7 * pdds, HBITMAP hbm, int x, int y,
int dx, int dy)
{
HDC hdcImage;
HDC hdc;
BITMAP bm;
DDSURFACEDESC2 ddsd;
HRESULT hr;
if (hbm == NULL || pdds == NULL)
return E_FAIL;
//
// Make sure this surface is restored.
//
pdds->Restore();
//
// Select bitmap into a memoryDC so we can use it.
//
hdcImage = CreateCompatibleDC(NULL);
if (!hdcImage)
OutputDebugString("createcompatible dc failed\n");
SelectObject(hdcImage, hbm);
//
// Get size of the bitmap
//
GetObject(hbm, sizeof(bm), &bm);
dx = dx == 0 ? bm.bmWidth : dx; // Use the passed size, unless zero
dy = dy == 0 ? bm.bmHeight : dy;
//
// Get size of surface.
//
ddsd.dwSize = sizeof(ddsd);
ddsd.dwFlags = DDSD_HEIGHT | DDSD_WIDTH;
pdds->GetSurfaceDesc(&ddsd);
if ((hr = pdds->GetDC(&hdc)) == DD_OK)
{
StretchBlt(hdc, 0, 0, ddsd.dwWidth, ddsd.dwHeight, hdcImage, x, y,
dx, dy, SRCCOPY);
pdds->ReleaseDC(hdc);
}
DeleteDC(hdcImage);
return hr;
}
Related
I have RGB data as rs2::frame, I converted it to cv::Mat and send via TCP connection, on the server (receiver) side I am storing buffer into a cv::Mat. My question is How can I convert cv::Mat to rs2::frame on the receiver side, so I can use SDK functions that support rs2::frame type?
You need to simulate a software device in order to have a rs2::frame.
Following this example you can write your own class that creates the synthetic streams, taking data from the cv::Mat instances.
For example, here's something I have done to solve the problem.
rsImageConverter.h
#include <librealsense2/rs.hpp>
#include <librealsense2/hpp/rs_internal.hpp>
class rsImageConverter
{
public:
rsImageConverter(int w, int h, int bpp);
bool convertFrame(uint8_t* depth_data, uint8_t* color_data);
rs2::frame getDepth() const;
rs2::frame getColor() const;
private:
int w = 640;
int h = 480;
int bpp = 2;
rs2::software_device dev;
rs2::software_sensor depth_sensor;
rs2::software_sensor color_sensor;
rs2::stream_profile depth_stream;
rs2::stream_profile color_stream;
rs2::syncer syncer;
rs2::frame depth;
rs2::frame color;
int ind = 0;
};
rsImageConverter.cpp
#include "rsimageconverter.h"
rsImageConverter::rsImageConverter(int w, int h, int bpp) :
w(w),
h(h),
bpp(bpp),
depth_sensor(dev.add_sensor("Depth")), // initializing depth sensor
color_sensor(dev.add_sensor("Color")) // initializing color sensor
{
rs2_intrinsics depth_intrinsics{ w, h, (float)(w / 2), (float)(h / 2), (float) w , (float) h , RS2_DISTORTION_BROWN_CONRADY ,{ 0,0,0,0,0 } };
depth_stream = depth_sensor.add_video_stream({ RS2_STREAM_DEPTH, 0, 0,
w, h, 60, bpp,
RS2_FORMAT_Z16, depth_intrinsics });
depth_sensor.add_read_only_option(RS2_OPTION_DEPTH_UNITS, 0.001f); // setting depth units option to the virtual sensor
rs2_intrinsics color_intrinsics = { w, h,
(float)w / 2, (float)h / 2,
(float)w / 2, (float)h / 2,
RS2_DISTORTION_BROWN_CONRADY ,{ 0,0,0,0,0 } };
color_stream = color_sensor.add_video_stream({ RS2_STREAM_COLOR, 0, 1, w,
h, 60, bpp,
RS2_FORMAT_RGB8, color_intrinsics });
dev.create_matcher(RS2_MATCHER_DLR_C); // create the matcher with the RGB frame
depth_sensor.open(depth_stream);
color_sensor.open(color_stream);
depth_sensor.start(syncer);
color_sensor.start(syncer);
depth_stream.register_extrinsics_to(color_stream, { { 1,0,0,0,1,0,0,0,1 },{ 0,0,0 } });
}
bool rsImageConverter::convertFrame(uint8_t* depth_data, uint8_t* color_data)
{
depth_sensor.on_video_frame({ depth_data, // Frame pixels
[](void*) {}, // Custom deleter (if required)
w*bpp, bpp, // Stride and Bytes-per-pixel
(rs2_time_t)ind * 16, RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK, ind, // Timestamp, Frame# for potential sync services
depth_stream });
color_sensor.on_video_frame({ color_data, // Frame pixels from capture API
[](void*) {}, // Custom deleter (if required)
w*bpp, bpp, // Stride and Bytes-per-pixel
(rs2_time_t)ind * 16, RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK, ind, // Timestamp, Frame# for potential sync services
color_stream });
ind++;
rs2::frameset fset = syncer.wait_for_frames();
depth = fset.first_or_default(RS2_STREAM_DEPTH);
color = fset.first_or_default(RS2_STREAM_COLOR);
return (depth && color); // return true if everything went good
}
rs2::frame rsImageConverter::getDepth() const
{
return depth;
}
rs2::frame rsImageConverter::getColor() const
{
return color;
}
And then you can use it like this (assuming depth and rgb as two cv::Mat, where depth is converted in CV_16U and rgb is CV_8UC3 with a conversion from BGR to RGB):
rsImageConverter* converter = new rsImageConverter(640, 480, 2);
...
if(converter->convertFrame(depth.data, rgb.data))
{
rs2::frame rs2depth = converter->getDepth();
rs2::frame rs2rgb = converter->getColor();
... // Here you use these frames
}
By the way, I designed this class with the use of both depth and RGB. To convert only one of these you can simply pass an empty frame to the other argument, or change the class.
zbar engine sample source(zbarimg.c) shows the following.
https://github.com/ZBar/ZBar/blob/master/zbarimg/zbarimg.c
size_t bloblen = width * height;
unsigned char *blobdata = malloc(bloblen);
MagickExportImagePixels(images, 0, 0, width, height, "I", CharPixel, blobdata);
I'd like to see the blobdata.
How can I save the blobdata to file?
I made save_imgdata function to save blobdata.
int save_imgdata(char* imgf, int width, int height, char *raw)
{
PixelWand *p_wand = NULL;
PixelIterator *iterator = NULL;
PixelWand **pixels = NULL;
unsigned long x, y;
char hex[128];
//MagickWandGenesis();
p_wand = NewPixelWand();
PixelSetColor(p_wand, "gray");
//PixelSetColor(p_wand, "white");
MagickWand *m_wand = NewMagickWand(); //CORE_RL_wand_.lib;
MagickSetImageDepth(m_wand, 8);
MagickNewImage(m_wand, width, height, p_wand);
// Get a new pixel iterator
iterator = NewPixelIterator(m_wand);
for (y = 0; y<height; y++) {
// Get the next row of the image as an array of PixelWands
pixels = PixelGetNextIteratorRow(iterator, &x);
// Set the row of wands to a simple gray scale gradient
for (x = 0; x<width; x++) {
sprintf(hex, "#%02x", *raw++);
//sprintf(hex, "#%02%x02%x02x", *raw, *raw, *raw); raw++;
PixelSetColor(pixels[x], hex);
}
// Sync writes the pixels back to the m_wand
PixelSyncIterator(iterator);
}
MagickWriteImage(m_wand, imgf);
DestroyMagickWand(m_wand);
return 0;
}
The call of save_imgdata("imgw.bmp", width, height, blobdata)
save 24bpp image.
What's wrong of save_imgdata?
I want it saves 8bpp gray image file.
Don't bother iterating and building dynamic color/pixel values -- It's slow and resource intensive. If the data came from an export method, than use the import method to restore.
int save_imgdata(char* imgf, int width, int height, void * raw)
{
MagickWand * wand;
PixelWand * bgcolor;
bgcolor = NewPixelWand();
PixelSetColor(bgcolor, "WHITE");
wand = NewMagickWand();
MagickNewImage(wand, width, height, bgcolor);
bgcolor = DestroyPixelWand(bgcolor);
MagickSetImageDepth(wand, 8);
MagickSetImageColorspace(wand, GRAYColorspace);
MagickImportImagePixels(wand, 0, 0, width, height, "I", CharPixel, raw);
MagickQuantizeImage(wand,
256, // Reduce to 8bpp
GRAYColorspace, // Match colorspace
0, // Calculate optimal tree depth
MagickTrue, // Use dither ? This changes in IM-7
MagickFalse); // Messure Error
MagickWriteImage(wand, imgf);
wand = DestroyMagickWand(wand);
return 0;
}
How can I flipped the SharpDX.Databox without converting it to bitmap?
I'm making a screen recording using SharpDX and Media foundation. Below is the code on how I get the Databox.
mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0,SharpDX.Direct3D11.MapMode.Read, SharpDX.Direct3D11.MapFlags.None);
But when I passed the mapSource in mediafoundation.net I produced a vertical video.
IMFSample sample = null;
IMFMediaBuffer buffer = null;
IntPtr data = new IntPtr();
int bufferMaxLength;
int bufferCurrentLength;
int hr = (int)MFExtern.MFCreateMemoryBuffer(frameSizeBytes, out buffer);
if (Succeeded(hr)) hr = (int)buffer.Lock(out data, out bufferMaxLength, out bufferCurrentLength);
if (Succeeded(hr))
{
hr = (int)MFExtern.MFCopyImage(data, videoWidth * BYTES_PER_PIXEL, mapSource.DataPointer, videoWidth * BYTES_PER_PIXEL, videoWidth * BYTES_PER_PIXEL, videoHeight);
}
if (Succeeded(hr)) hr = (int)buffer.Unlock();
if (Succeeded(hr)) hr = (int)buffer.SetCurrentLength(frameSizeBytes);
if (Succeeded(hr)) hr = (int)MFExtern.MFCreateSample(out sample);
if (Succeeded(hr)) hr = (int)sample.AddBuffer(buffer);
if (Succeeded(hr)) hr = (int)sample.SetSampleTime(frame.prevRecordingDuration.Ticks);//(TICKS_PER_SECOND * frames / VIDEO_FPS);
if (Succeeded(hr)) hr = (int)sample.SetSampleDuration((frame.recordDuration-frame.prevRecordingDuration).Ticks);
if (Succeeded(hr)) hr = (int)sinkWriter.WriteSample(streamIndex, sample);
if (Succeeded(hr)) frames++;
COMBase.SafeRelease(sample);
COMBase.SafeRelease(buffer);
enter image description here
In your code there is a mistake in code with MFCopyImage. According MFCopyImage function you must set
_In_ LONG lDestStride,
_In_ const BYTE *pSrc,
_In_ LONG lSrcStride, - lDestStride and lSrcStride - is width of memory for storing one line of pixels - your computing videoWidth * BYTES_PER_PIXEL is not correct, because for Windows RGB format stride can be widther than videoWidth * BYTES_PER_PIXEL. You must compute destination stride by function MFGetStrideForBitmapInfoHeader, source stride you can get from you image source code - I do not know you code, but for my project I used
D3D11_MAPPED_SUBRESOURCE resource;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
ctx->Map(mDestImage, subresource, D3D11_MAP_READ_WRITE, 0, &resource);
LOG_INVOKE_MF_FUNCTION(MFCopyImage,
aPtrData,
mStride,
(BYTE*)resource.pData,
resource.RowPitch,
RowPitch.
Regards.
P.S. Destination stride mStride can be negative - it means that it needs write from last line to the first. It can done by the next changing of destination pointer - aPtrData += (mHeight - 1)*mStride;
I want to use OpenCV to process my desktop as if it were a video stream.
I am familiar with OpenCV.
I am not familiar with the Windows API.
I realize there are other ways to capture the screen, but for the purposes of my question, I need it to be done using OpenCV.
Here is my (super naive) code:
HWND hDesktopWnd;
HDC hDesktopDC;
hDesktopWnd=GetDesktopWindow();
hDesktopDC=GetDC(hDesktopWnd);
// get the height and width of the screen
int height = GetSystemMetrics(SM_CYVIRTUALSCREEN);
int width = GetSystemMetrics(SM_CXVIRTUALSCREEN);
// create a bitmap
HBITMAP hbDesktop = CreateCompatibleBitmap( hDesktopDC, width, height);
Mat src(height,width,CV_8UC4);
src.data = (uchar*)hbDesktop;
imshow("output",src); //fails :(
There are similar questions on StackOverflow, but they are either for the old-style OpenCV, or for Android operating system.
I'm on windows 7 64x
Opencv 2.4.3
Thanks anyone who can answer this question.
After MUCH trial and error, I managed to write a function to do it. here it is for anyone else who might want it:
#include "stdafx.h"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <Windows.h>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
Mat hwnd2mat(HWND hwnd){
HDC hwindowDC,hwindowCompatibleDC;
int height,width,srcheight,srcwidth;
HBITMAP hbwindow;
Mat src;
BITMAPINFOHEADER bi;
hwindowDC=GetDC(hwnd);
hwindowCompatibleDC=CreateCompatibleDC(hwindowDC);
SetStretchBltMode(hwindowCompatibleDC,COLORONCOLOR);
RECT windowsize; // get the height and width of the screen
GetClientRect(hwnd, &windowsize);
srcheight = windowsize.bottom;
srcwidth = windowsize.right;
height = windowsize.bottom/2; //change this to whatever size you want to resize to
width = windowsize.right/2;
src.create(height,width,CV_8UC4);
// create a bitmap
hbwindow = CreateCompatibleBitmap( hwindowDC, width, height);
bi.biSize = sizeof(BITMAPINFOHEADER); //http://msdn.microsoft.com/en-us/library/windows/window/dd183402%28v=vs.85%29.aspx
bi.biWidth = width;
bi.biHeight = -height; //this is the line that makes it draw upside down or not
bi.biPlanes = 1;
bi.biBitCount = 32;
bi.biCompression = BI_RGB;
bi.biSizeImage = 0;
bi.biXPelsPerMeter = 0;
bi.biYPelsPerMeter = 0;
bi.biClrUsed = 0;
bi.biClrImportant = 0;
// use the previously created device context with the bitmap
SelectObject(hwindowCompatibleDC, hbwindow);
// copy from the window device context to the bitmap device context
StretchBlt( hwindowCompatibleDC, 0,0, width, height, hwindowDC, 0, 0,srcwidth,srcheight, SRCCOPY); //change SRCCOPY to NOTSRCCOPY for wacky colors !
GetDIBits(hwindowCompatibleDC,hbwindow,0,height,src.data,(BITMAPINFO *)&bi,DIB_RGB_COLORS); //copy from hwindowCompatibleDC to hbwindow
// avoid memory leak
DeleteObject (hbwindow); DeleteDC(hwindowCompatibleDC); ReleaseDC(hwnd, hwindowDC);
return src;
}
A better way to do it is do it while allocating memory to the pixels only once. so the only copy done here is the one that made by BitBlt
int main()
{
int x_size = 800, y_size = 600; // <-- Your res for the image
HBITMAP hBitmap; // <-- The image represented by hBitmap
Mat matBitmap; // <-- The image represented by mat
// Initialize DCs
HDC hdcSys = GetDC(NULL); // Get DC of the target capture..
HDC hdcMem = CreateCompatibleDC(hdcSys); // Create compatible DC
void *ptrBitmapPixels; // <-- Pointer variable that will contain the potinter for the pixels
// Create hBitmap with Pointer to the pixels of the Bitmap
BITMAPINFO bi; HDC hdc;
ZeroMemory(&bi, sizeof(BITMAPINFO));
bi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bi.bmiHeader.biWidth = x_size;
bi.bmiHeader.biHeight = -y_size; //negative so (0,0) is at top left
bi.bmiHeader.biPlanes = 1;
bi.bmiHeader.biBitCount = 32;
hdc = GetDC(NULL);
hBitmap = CreateDIBSection(hdc, &bi, DIB_RGB_COLORS, &ptrBitmapPixels, NULL, 0);
// ^^ The output: hBitmap & ptrBitmapPixels
// Set hBitmap in the hdcMem
SelectObject(hdcMem, hBitmap);
// Set matBitmap to point to the pixels of the hBitmap
matBitmap = Mat(y_size, x_size, CV_8UC4, ptrBitmapPixels, 0);
// ^^ note: first it is y, then it is x. very confusing
// * SETUP DONE *
// Now update the pixels using BitBlt
BitBlt(hdcMem, 0, 0, x_size, y_size, hdcSys, 0, 0, SRCCOPY);
// Just to do some image processing on the pixels.. (Dont have to to this)
Mat matRef = matBitmap(Range(100, 200), Range(100, 200));
// y1 y2 x1 x2
bitwise_not(matRef, matRef); // Invert the colors in this x1,x2,y1,y2
// Display the results through Mat
imshow("Title", matBitmap);
// Wait until some key is pressed
waitKey(0);
return 0;
}
Note that no error handling done here to make it simple to understand but you have to do error handling in your code!
Hope this helps
Can cudaMemcpy be used for memory allocated with cudaMallocPitch? If not, can you tell, which function should be used. cudaMallocPitch returns linear memory, so I suppose that cudaMemcpy should be used.
You certainly could use cudaMemcpy to copy pitched device memory, but it would be more usual to use cudaMemcpy2D. An example of a pitched copy from host to device would look something like this:
#include "cuda.h"
#include <assert.h>
typedef float real;
int main(void)
{
cudaFree(0); // Establish context
// Host array dimensions
const size_t dx = 300, dy = 300;
// For the CUDA API width and pitch are specified in bytes
size_t width = dx * sizeof(real), height = dy;
// Host array allocation
real * host = new real[dx * dy];
size_t pitch1 = dx * sizeof(real);
// Device array allocation
// pitch is determined by the API call
real * device;
size_t pitch2;
assert( cudaMallocPitch((real **)&device, &pitch2, width, height) == cudaSuccess );
// Sample memory copy - note source and destination pitches can be different
assert( cudaMemcpy2D(device, pitch2, host, pitch1, width, height, cudaMemcpyHostToDevice) == cudaSuccess );
// Destroy context
assert( cudaDeviceReset() == cudaSuccess );
return 0;
}
(note: untested, cavaet emptor and all that.....)