CreateWindow coordinates are limited to 32767? - createwindow

How I can create child window with coordinates that are larger than 32767? For example:
HWND tmp =
CreateWindow(
_T( "BUTTON" ), _T( "Test" ),
WS_CHILD | WS_VISIBLE,
10, 45000, 80, 25,
hWnd, (HMENU)1, (HINSTANCE)GetModuleHandle( NULL ), NULL );
This code creates button with coordinates 10;32767. Using of MoveWindow or SetWindowPos functions give same result.
I need to create window with scroll and child controls on it (simple form).

HWND CreateWindowExPatched( DWORD exStyle, LPCTSTR className, LPCTSTR title, DWORD style, int x, int y, int width, int height, HWND parent, HMENU menu, HINSTANCE instance, LPVOID param )
{
HWND hWnd = NULL;
ScrollWindow( parent, -x, -y, NULL, NULL );
hWnd = CreateWindowEx( exStyle, className, title, style, 0, 0, width, height, parent, menu, instance, param );
ScrollWindow( parent, x, y, NULL, NULL );
return hWnd;
}
HWND CreateWindowPatched( LPCTSTR className, LPCTSTR title, DWORD style, int x, int y, int width, int height, HWND parent, HMENU menu, HINSTANCE instance, LPVOID param )
{
return CreateWindowExPatched( 0, className, title, style, x, y, width, height, parent, menu, instance, param );
}

Which operating system are you using to run your code?
On my project, while Windows 7 seems to truncate coordinates to 16 bit signed integer range (-32768 and 32767), Window XP doesn't seem to do that and allows positioning from -2147483648 to 2147483647, the whole 32 bit signed integer range.

Related

Convert OpenCV Mat to Frame type of Realsense in Librealsense SDK

I have RGB data as rs2::frame, I converted it to cv::Mat and send via TCP connection, on the server (receiver) side I am storing buffer into a cv::Mat. My question is How can I convert cv::Mat to rs2::frame on the receiver side, so I can use SDK functions that support rs2::frame type?
You need to simulate a software device in order to have a rs2::frame.
Following this example you can write your own class that creates the synthetic streams, taking data from the cv::Mat instances.
For example, here's something I have done to solve the problem.
rsImageConverter.h
#include <librealsense2/rs.hpp>
#include <librealsense2/hpp/rs_internal.hpp>
class rsImageConverter
{
public:
rsImageConverter(int w, int h, int bpp);
bool convertFrame(uint8_t* depth_data, uint8_t* color_data);
rs2::frame getDepth() const;
rs2::frame getColor() const;
private:
int w = 640;
int h = 480;
int bpp = 2;
rs2::software_device dev;
rs2::software_sensor depth_sensor;
rs2::software_sensor color_sensor;
rs2::stream_profile depth_stream;
rs2::stream_profile color_stream;
rs2::syncer syncer;
rs2::frame depth;
rs2::frame color;
int ind = 0;
};
rsImageConverter.cpp
#include "rsimageconverter.h"
rsImageConverter::rsImageConverter(int w, int h, int bpp) :
w(w),
h(h),
bpp(bpp),
depth_sensor(dev.add_sensor("Depth")), // initializing depth sensor
color_sensor(dev.add_sensor("Color")) // initializing color sensor
{
rs2_intrinsics depth_intrinsics{ w, h, (float)(w / 2), (float)(h / 2), (float) w , (float) h , RS2_DISTORTION_BROWN_CONRADY ,{ 0,0,0,0,0 } };
depth_stream = depth_sensor.add_video_stream({ RS2_STREAM_DEPTH, 0, 0,
w, h, 60, bpp,
RS2_FORMAT_Z16, depth_intrinsics });
depth_sensor.add_read_only_option(RS2_OPTION_DEPTH_UNITS, 0.001f); // setting depth units option to the virtual sensor
rs2_intrinsics color_intrinsics = { w, h,
(float)w / 2, (float)h / 2,
(float)w / 2, (float)h / 2,
RS2_DISTORTION_BROWN_CONRADY ,{ 0,0,0,0,0 } };
color_stream = color_sensor.add_video_stream({ RS2_STREAM_COLOR, 0, 1, w,
h, 60, bpp,
RS2_FORMAT_RGB8, color_intrinsics });
dev.create_matcher(RS2_MATCHER_DLR_C); // create the matcher with the RGB frame
depth_sensor.open(depth_stream);
color_sensor.open(color_stream);
depth_sensor.start(syncer);
color_sensor.start(syncer);
depth_stream.register_extrinsics_to(color_stream, { { 1,0,0,0,1,0,0,0,1 },{ 0,0,0 } });
}
bool rsImageConverter::convertFrame(uint8_t* depth_data, uint8_t* color_data)
{
depth_sensor.on_video_frame({ depth_data, // Frame pixels
[](void*) {}, // Custom deleter (if required)
w*bpp, bpp, // Stride and Bytes-per-pixel
(rs2_time_t)ind * 16, RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK, ind, // Timestamp, Frame# for potential sync services
depth_stream });
color_sensor.on_video_frame({ color_data, // Frame pixels from capture API
[](void*) {}, // Custom deleter (if required)
w*bpp, bpp, // Stride and Bytes-per-pixel
(rs2_time_t)ind * 16, RS2_TIMESTAMP_DOMAIN_HARDWARE_CLOCK, ind, // Timestamp, Frame# for potential sync services
color_stream });
ind++;
rs2::frameset fset = syncer.wait_for_frames();
depth = fset.first_or_default(RS2_STREAM_DEPTH);
color = fset.first_or_default(RS2_STREAM_COLOR);
return (depth && color); // return true if everything went good
}
rs2::frame rsImageConverter::getDepth() const
{
return depth;
}
rs2::frame rsImageConverter::getColor() const
{
return color;
}
And then you can use it like this (assuming depth and rgb as two cv::Mat, where depth is converted in CV_16U and rgb is CV_8UC3 with a conversion from BGR to RGB):
rsImageConverter* converter = new rsImageConverter(640, 480, 2);
...
if(converter->convertFrame(depth.data, rgb.data))
{
rs2::frame rs2depth = converter->getDepth();
rs2::frame rs2rgb = converter->getColor();
... // Here you use these frames
}
By the way, I designed this class with the use of both depth and RGB. To convert only one of these you can simply pass an empty frame to the other argument, or change the class.

SharpDX MapSubresource is vertically flipped C#

How can I flipped the SharpDX.Databox without converting it to bitmap?
I'm making a screen recording using SharpDX and Media foundation. Below is the code on how I get the Databox.
mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0,SharpDX.Direct3D11.MapMode.Read, SharpDX.Direct3D11.MapFlags.None);
But when I passed the mapSource in mediafoundation.net I produced a vertical video.
IMFSample sample = null;
IMFMediaBuffer buffer = null;
IntPtr data = new IntPtr();
int bufferMaxLength;
int bufferCurrentLength;
int hr = (int)MFExtern.MFCreateMemoryBuffer(frameSizeBytes, out buffer);
if (Succeeded(hr)) hr = (int)buffer.Lock(out data, out bufferMaxLength, out bufferCurrentLength);
if (Succeeded(hr))
{
hr = (int)MFExtern.MFCopyImage(data, videoWidth * BYTES_PER_PIXEL, mapSource.DataPointer, videoWidth * BYTES_PER_PIXEL, videoWidth * BYTES_PER_PIXEL, videoHeight);
}
if (Succeeded(hr)) hr = (int)buffer.Unlock();
if (Succeeded(hr)) hr = (int)buffer.SetCurrentLength(frameSizeBytes);
if (Succeeded(hr)) hr = (int)MFExtern.MFCreateSample(out sample);
if (Succeeded(hr)) hr = (int)sample.AddBuffer(buffer);
if (Succeeded(hr)) hr = (int)sample.SetSampleTime(frame.prevRecordingDuration.Ticks);//(TICKS_PER_SECOND * frames / VIDEO_FPS);
if (Succeeded(hr)) hr = (int)sample.SetSampleDuration((frame.recordDuration-frame.prevRecordingDuration).Ticks);
if (Succeeded(hr)) hr = (int)sinkWriter.WriteSample(streamIndex, sample);
if (Succeeded(hr)) frames++;
COMBase.SafeRelease(sample);
COMBase.SafeRelease(buffer);
enter image description here
In your code there is a mistake in code with MFCopyImage. According MFCopyImage function you must set
_In_ LONG lDestStride,
_In_ const BYTE *pSrc,
_In_ LONG lSrcStride, - lDestStride and lSrcStride - is width of memory for storing one line of pixels - your computing videoWidth * BYTES_PER_PIXEL is not correct, because for Windows RGB format stride can be widther than videoWidth * BYTES_PER_PIXEL. You must compute destination stride by function MFGetStrideForBitmapInfoHeader, source stride you can get from you image source code - I do not know you code, but for my project I used
D3D11_MAPPED_SUBRESOURCE resource;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
ctx->Map(mDestImage, subresource, D3D11_MAP_READ_WRITE, 0, &resource);
LOG_INVOKE_MF_FUNCTION(MFCopyImage,
aPtrData,
mStride,
(BYTE*)resource.pData,
resource.RowPitch,
RowPitch.
Regards.
P.S. Destination stride mStride can be negative - it means that it needs write from last line to the first. It can done by the next changing of destination pointer - aPtrData += (mHeight - 1)*mStride;

Direct3D11, some part of the model always in front of the others, probably about depth

I'm a new D3D programmer.
When I tried to render a model, I got a strange problem.!
you can see the picture, some part of the model always in front of the others.
the model vertex only contains the following data
{
float x, y, z;
float r, g, b;
float u, v;
}
I tried to render it in opengl and webgl ( http://nalol.azurewebsites.net/ ), it works well. but in D3D11, I got this strange problem.
I tried Google and find something about depth, but i don't know how to deal with it.
the following are some part of my code:
HLSL file
struct vout
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
float2 texcoord : TEXCOORD;
};
vout vshader(float3 position : POSITION, float3 normals : NORMAL, float2 texcoords : TEXCOORD)
{
vout output;
output.position = float4(position, 1);
output.normal = normals ;
output.texcoord = texcoords;
return output;
}
Texture2D shaderTexture;
SamplerState SampleType;
float3 pshader(float3 position : POSITION, float3 normals : NORMAL, float2 texcoords : TEXCOORD) : SV_TARGET
{
return shaderTexture.Sample(SampleType, texcoords);
}
vertex struct
struct lol_skn_vertex {
float position[3];
char bone_index[4]; // for bones and animation, not used here
float bone_weights[4]; // for bones and animation, not used here
float normals[3];
float texcoords[2];
};
input layout object
D3D11_INPUT_ELEMENT_DESC ied[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"NORMAL", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 32, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 44, D3D11_INPUT_PER_VERTEX_DATA, 0},
};
render function
void RenderFrame(void)
{
FLOAT ColorRGBA[4] = {0.0f, 0.2f, 0.4f, 1.0f};
d3d11_device_context->ClearRenderTargetView(d3d11_view_rt_backbuffer, ColorRGBA);
d3d11_device_context->ClearDepthStencilView(d3d11_view_ds,D3D11_CLEAR_DEPTH|D3D11_CLEAR_STENCIL,1.f,0);
update();
UINT stride = sizeof(lol_skn_vertex);
UINT offset = 0;
d3d11_device_context->IASetVertexBuffers(0, 1, &vertex_buffer, &stride, &offset);
d3d11_device_context->IASetIndexBuffer(index_buffer, DXGI_FORMAT_R16_UINT, 0);
d3d11_device_context->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3d11_device_context->DrawIndexed(skn.num_indices, 0, 0);
// switch the back buffer and the front buffer
dxgi_swapchain->Present(0, 0);
}
buffer update function
void update() {
// copy the vertices into the buffer
D3D11_MAPPED_SUBRESOURCE ms;
d3d11_device_context->Map(vertex_buffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, skn_vertex_buffer, sizeof(lol_skn_vertex) * skn.num_vertices); // copy the data
// unmap the buffer
SYSTEMTIME SystemTime;
GetSystemTime(&SystemTime);
float angle = (float)SystemTime.wMilliseconds/1000+SystemTime.wSecond;
D3DXMATRIX x;
D3DXMatrixRotationY(&x, angle);
D3DXVec4TransformArray((D3DXVECTOR4 *)ms.pData, sizeof(lol_skn_vertex), (D3DXVECTOR4 *)ms.pData, sizeof(lol_skn_vertex), &x, skn.num_vertices);
// use D3DXVECTOR4 for Transform
d3d11_device_context->Unmap(vertex_buffer, NULL);
}
At last I solved the problem.
I make 2 very stupid mistake.
first: in "input layout object", i use DXGI_FORMAT_R32G32_FLOAT for position, which only contain x and y. so the shader always get 0 on z.
second: my model data is not normalized, which ranged from -50 to 50, so i use D3D11_RASTERIZER_DESC to disable DepthClip and I forgot about it.
Fix this 2 problems and everything works.
And great thank to Gnietschow :)

C to delphi method conversion of openCV sample code

I have a code snippet from openCV example as follows:
CvScalar sum_line_pixels( IplImage* image, CvPoint pt1, CvPoint pt2 )
{
CvLineIterator iterator;
int blue_sum = 0, green_sum = 0, red_sum = 0;
int count = cvInitLineIterator( image, pt1, pt2, &iterator, 8, 0 );
for( int i = 0; i < count; i++ ){
blue_sum += iterator.ptr[0];
green_sum += iterator.ptr[1];
red_sum += iterator.ptr[2];
CV_NEXT_LINE_POINT(iterator);
/* print the pixel coordinates: demonstrates how to calculate the
coordinates */
{
int offset, x, y;
/* assume that ROI is not set, otherwise need to take it
into account. */
offset = iterator.ptr - (uchar*)(image->imageData);
y = offset/image->widthStep;
x = (offset - y*image->widthStep)/(3*sizeof(uchar)
/* size of pixel */);
printf("(%d,%d)\n", x, y );
}
}
return cvScalar( blue_sum, green_sum, red_sum );
}
I got stuck on the line:
offset = iterator.ptr - (uchar*)(image->imageData);
Iterator structure is:
PCvLineIterator = ^TCvLineIterator;
TCvLineIterator = packed record
ptr: ^UCHAR;
err: Integer;
plus_delta: Integer;
minus_delta: Integer;
plus_step: Integer;
minus_step: Integer;
end;
image->imageData is
imageData: PByte;
Could someone help me convert the offset line to delphi?
Thanks!
The line that calculates offset is simply calculating the number of bytes between the pointers iterator.ptr and image->imageData. Assuming you are using the same variable names a Delphi version of that code would be like this:
offset := PByte(iterator.ptr) - image.ImageData;
However, since you are using an older version of Delphi, the above code will not compile. Older Delphi versions (pre Delphi 2009) don't permit pointer arithmetic on types other than PAnsiChar. So you will need to write it like this:
offset := PAnsiChar(iterator.ptr) - PAnsiChar(image.ImageData);
I suspect that what is confusing you in the C code is (uchar*). That is the C syntax for a type cast.
As an aside, it is a mistake to use packed records for OpenCV structs. If you take a look at the C header files you will see that these structs are not packed. This is benign in the case of CvLineIterator since it has no padding, but you will get caught out somewhere down the line if you get into the bad habit of packing structs that should not be packed.

Direct X Sprite Rendering Issue

I have been writing my own library using Direct X and have hit an odd issue. Whilst trying to render an animating sprite I am simply seeing a big black square:
I have stepped through the code obsessively and have concluded that it must be something about the loading of the actual sprites, because everything that I can see in my code is fine. Obviously, I cannot step into the functions such as BltFast, and so cannot tell if my sprite surfaces are being blitted onto the backbuffer successfully.
Here are my load and render functions for the sprite:
SPRITE::LOAD
/**
* loads a bitmap file and copies it to a directdraw surface
*
* #param pID wait
* #param pFileName name of the bitmap file to load into memory
*/
void Sprite::Load (const char *pID, const char *pFileName)
{
// initialises the member variables with the new image id and file name
mID = pID;
mFileName = pFileName;
// creates the necessary variables
HBITMAP tHBM;
BITMAP tBM;
DDSURFACEDESC2 tDDSD;
IDirectDrawSurface7 *tDDS;
// stores bitmap image into HBITMAP handler
tHBM = static_cast<HBITMAP> (LoadImage (NULL, pFileName, IMAGE_BITMAP, 0, 0, LR_LOADFROMFILE | LR_CREATEDIBSECTION));
GetObject (tHBM, sizeof (tBM), &tBM);
// create surface for the HBITMAP to be copied onto
ZeroMemory (&tDDSD, sizeof (tDDSD));
tDDSD.dwSize = sizeof (tDDSD);
tDDSD.dwFlags = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH;
tDDSD.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN;
tDDSD.dwWidth = tBM.bmWidth;
tDDSD.dwHeight = tBM.bmHeight;
DirectDraw::GetInstance ()->DirectDrawObject()->CreateSurface (&tDDSD, &tDDS, NULL);
// copying bitmap image onto surface
CopyBitmap(tDDS, tHBM, 0, 0, 0, 0);
// deletes bitmap image now that it has been used
DeleteObject(tHBM);
// stores the new width and height of the image
mSpriteWidth = tBM.bmWidth;
mSpriteHeight = tBM.bmHeight;
// sets the address of the bitmap surface to this temporary surface with the new bitmap image
mBitmapSurface = tDDS;
}
SPRITE::RENDER
/**
* renders the sprites surface to the back buffer
*
* #param pBackBuffer surface to render the sprite to
* #param pX x co-ordinate to render to (default is 0)
* #param pY y co-ordinate to render to (default is 0)
*/
void Sprite::Render (LPDIRECTDRAWSURFACE7 &pBackBuffer, float pX, float pY)
{
if (mSpriteWidth > 800) mSpriteWidth = 800;
RECT tFrom;
tFrom.left = tFrom.top = 0;
tFrom.right = mSpriteWidth;
tFrom.bottom = mSpriteHeight;
// bltfast parameters are (position x, position y, dd surface, draw rect, wait flag)
// pBackBuffer->BltFast (0 + DirectDraw::GetInstance()->ScreenWidth(), 0, mBitmapSurface, &tFrom, DDBLTFAST_WAIT);
pBackBuffer->BltFast (static_cast<DWORD>(pX + DirectDraw::GetInstance()->ScreenWidth()),
static_cast<DWORD>(pY), mBitmapSurface, &tFrom, DDBLTFAST_WAIT);
}
The surfaces were simply not a compatible format.
Here's the fixed copybitmap function which I now call in the load function:
extern "C" HRESULT
DDCopyBitmap(IDirectDrawSurface7 * pdds, HBITMAP hbm, int x, int y,
int dx, int dy)
{
HDC hdcImage;
HDC hdc;
BITMAP bm;
DDSURFACEDESC2 ddsd;
HRESULT hr;
if (hbm == NULL || pdds == NULL)
return E_FAIL;
//
// Make sure this surface is restored.
//
pdds->Restore();
//
// Select bitmap into a memoryDC so we can use it.
//
hdcImage = CreateCompatibleDC(NULL);
if (!hdcImage)
OutputDebugString("createcompatible dc failed\n");
SelectObject(hdcImage, hbm);
//
// Get size of the bitmap
//
GetObject(hbm, sizeof(bm), &bm);
dx = dx == 0 ? bm.bmWidth : dx; // Use the passed size, unless zero
dy = dy == 0 ? bm.bmHeight : dy;
//
// Get size of surface.
//
ddsd.dwSize = sizeof(ddsd);
ddsd.dwFlags = DDSD_HEIGHT | DDSD_WIDTH;
pdds->GetSurfaceDesc(&ddsd);
if ((hr = pdds->GetDC(&hdc)) == DD_OK)
{
StretchBlt(hdc, 0, 0, ddsd.dwWidth, ddsd.dwHeight, hdcImage, x, y,
dx, dy, SRCCOPY);
pdds->ReleaseDC(hdc);
}
DeleteDC(hdcImage);
return hr;
}

Resources