I have a Direct2D that interops with Direct3D.
My Direct2D renders to a Direct3d texture just fine. I'm then trying to render that texture on top of my Direct3D scene, but when I render the texture it shows up fine but it covers the rest in black instead of blending
D2D1_BITMAP_PROPERTIES1 properties = D2D1::BitmapProperties1(
D2D1_BITMAP_OPTIONS_TARGET | D2D1_BITMAP_OPTIONS_CANNOT_DRAW,
D2D1::PixelFormat(
DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_PREMULTIPLIED
)
);
pd2dDeviceCtx->CreateBitmapFromDxgiSurface(surface.Get(), &properties, &p2dBitmap);
pd2dDeviceCtx->SetTarget(p2dBitmap.Get());
// BlendState
D3D11_BLEND_DESC blendStateDescription = {};
blendStateDescription.RenderTarget[0].BlendEnable = TRUE;
blendStateDescription.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
blendStateDescription.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
blendStateDescription.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
blendStateDescription.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
blendStateDescription.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ZERO;
blendStateDescription.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
blendStateDescription.RenderTarget[0].RenderTargetWriteMask = 0x0f;
pDevice->CreateBlendState(&blendStateDescription, &blendState);
pContext->OMSetBlendState(blendState.Get(), NULL, sampleMask);
//D3D draws....
pd3dContext->DrawIndexed(6, 0, 0);
//D2D draws
pd2dDeviceCtx->BeginDraw();
pd2dDeviceCtx->Clear(D2D1::ColorF(0, 0, 0, 0));
// .. more D2D draws
pd2dDeviceCtx->EndDraw();
// ... Draw texture that d2d used to direct3d
pContext->VSSetShader(vertextShader.Get(), nullptr, 0);
pContext->PSSetShader(pixelShader.Get(), nullptr, 0);
pContext->PSSetShaderResources(0, 1, d2dTextShaderResource.GetAddressOf());
pContext->PSSetSamplers(0, 1, samplers.GetAddressOf());
pContext->DrawIndexed(6, 0, 0);
My texture renders correctly but where it should be transparent its all black hiding what used to be there.
Related
// this is the function used to render a single frame
void render_frame(void)
{
init_graphics();
d3ddev->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(255, 0, 0), 1.0f, 0);
d3ddev->BeginScene();
// select which vertex format we are using
d3ddev->SetFVF(CUSTOMFVF);
// select the vertex buffer to display
d3ddev->SetStreamSource(0, v_buffer, 0, sizeof(CUSTOMVERTEX));
// copy the vertex buffer to the back buffer
d3ddev->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);
// d3ddev->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 1);
d3ddev->EndScene();
d3ddev->Present(NULL, NULL, NULL, NULL);
}
// this is the function that puts the 3D models into video RAM
void init_graphics(void)
{
// create the vertices using the CUSTOMVERTEX struct
CUSTOMVERTEX vertices[] =
{
{ 100.f, 0.f, 0.f, D3DCOLOR_XRGB(0, 0, 255), },
{ 300.f, 0.f, 0.f, D3DCOLOR_XRGB(0, 0, 255), },
{ 300.f, 80.f, 0.f, D3DCOLOR_XRGB(0, 0, 255), },
{ 100.f, 80.f, 0.f, D3DCOLOR_XRGB(0, 0, 255), },
};
// create a vertex buffer interface called v_buffer
d3ddev->CreateVertexBuffer(6 * sizeof(CUSTOMVERTEX),
0,
CUSTOMFVF,
D3DPOOL_MANAGED,
&v_buffer,
NULL);
VOID* pVoid; // a void pointer
// lock v_buffer and load the vertices into it
v_buffer->Lock(0, 0, (void**)&pVoid, 0);
memcpy(pVoid, vertices, sizeof(vertices));
v_buffer->Unlock();
}
Can't render a square for some reason. I've searched for an hour but can't find the answer.
https://i.imgur.com/KCKZSrJ.jpg
Does anybody know how to render it? I'm using directx9.
Tried using DrawIndexPrimitive but It has the same result.
There's likely a few things going on here:
You do not set a Vertex or Pixel Shader, so you are using the legacy fixed-function render pipeline. This pipeline requires you set the view/projection matrices with SetTransform. Since you haven't done that, the vertex positions you provide in 'screens space' don't mean what you think they mean. See The Direct3D Transformation Pipeline.
You are not setting the backface culling mode via SetRenderState so it's defaulting to D3DCULL_CCW (i.e. cull counter-clockwise winding triangles). As such, your vertex positions are resulting in one of the triangles being rejected. You may want to to call SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE); getting started.
You are using TRIANGLESTRIP and only 4 points. You may find it easier to get correct initially by using TRIANGELIST and 6 points.
I try to perform MSAA on a framebuffer, And in the standalone version where i draw a cube to the framebuffer and blit that framebuffer to the canvas it works like a charm:
var gl = canvas.getContext("webgl2", {
antialias: false
});
const frambuffer = gl.createFramebuffer();
const renderbuffer = gl.createRenderbuffer();
gl.bindRenderbuffer(gl.RENDERBUFFER, renderbuffer);
gl.renderbufferStorageMultisample(gl.RENDERBUFFER, gl.getParameter(gl.MAX_SAMPLES), gl.RGBA8, this.width, this.height);
gl.bindFramebuffer(gl.FRAMEBUFFER, frambuffer);
gl.framebufferRenderbuffer(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.RENDERBUFFER, renderbuffer);
.. Prepare scene
gl.bindFramebuffer(gl.FRAMEBUFFER, frambuffer);
.. Draw scene
gl.bindFramebuffer(gl.READ_FRAMEBUFFER, frambuffer);
gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, null);
gl.clearBufferfv(gl.COLOR, 0, [1.0, 1.0, 1.0, 1.0]);
gl.blitFramebuffer( 0, 0, canvas.width, canvas.height,
0, 0, canvas.width, canvas.height,
gl.COLOR_BUFFER_BIT, gl.LINEAR);
But when i do this in my engine with a deferred pipeline the blit is performed but the MultiSample (MSAA) not. The difference i can think of is that i am there writing an image drawn to a quad to the framebuffer and in the working example a cube.
as requested,In the case it is not working the setup is like this:
var gl = canvas.getContext("webgl2", {
antialias: false
});
.. Load resources ..
.. Prepare renderpasses ..
shadow_depth for every light
deferred scene
ssao
shadow for first light
convolution on ssao and shadow
convolution
uber for every light
tonemap
msaa
..
.. draw renderpasses ..
deferred scene
ssao
shadow for first light
convolution on ssao and shadow
convolution
uber for every light
tonemap
...
const frambuffer = gl.createFramebuffer();
const renderbuffer = gl.createRenderbuffer();
gl.bindRenderbuffer(gl.RENDERBUFFER, renderbuffer);
gl.renderbufferStorageMultisample(gl.RENDERBUFFER, gl.getParameter(gl.MAX_SAMPLES), gl.RGBA8, this.width, this.height);
gl.bindFramebuffer(gl.FRAMEBUFFER, frambuffer);
gl.framebufferRenderbuffer(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.RENDERBUFFER, renderbuffer);
gl.bindFramebuffer(gl.FRAMEBUFFER, frambuffer);
..draw tonemap of scene to quad
gl.bindFramebuffer(gl.READ_FRAMEBUFFER, frambuffer);
gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, null);
gl.clearBufferfv(gl.COLOR, 0, [1.0, 1.0, 1.0, 1.0]);
gl.blitFramebuffer( 0, 0, canvas.width, canvas.height,
0, 0, canvas.width, canvas.height,
gl.COLOR_BUFFER_BIT, gl.LINEAR);
renderbufferStorageMultisample needs to be applied only for the framebuffer object that has the initial 3D content. When doing post-processing, multisampling does not have an effect because only 1 or 2 triangles being rasterized and they span the entire viewport.
Predefined: My A4 sheet will always be of white color.
I need to detect A4 sheet from image. I am able to detect rectangles, now the problem is I am getting multiple rectangles from my image. So I extracted the images from the detected rectangle points.
Now I want to match image color with white color.
Using below method to extract image from contours detected :
- (cv::Mat) getPaperAreaFromImage: (std::vector<cv::Point>) square, cv::Mat image
{
// declare used vars
int paperWidth = 210; // in mm, because scale factor is taken into account
int paperHeight = 297; // in mm, because scale factor is taken into account
cv::Point2f imageVertices[4];
float distanceP1P2;
float distanceP1P3;
BOOL isLandscape = true;
int scaleFactor;
cv::Mat paperImage;
cv::Mat paperImageCorrected;
cv::Point2f paperVertices[4];
// sort square corners for further operations
square = sortSquarePointsClockwise( square );
// rearrange to get proper order for getPerspectiveTransform()
imageVertices[0] = square[0];
imageVertices[1] = square[1];
imageVertices[2] = square[3];
imageVertices[3] = square[2];
// get distance between corner points for further operations
distanceP1P2 = distanceBetweenPoints( imageVertices[0], imageVertices[1] );
distanceP1P3 = distanceBetweenPoints( imageVertices[0], imageVertices[2] );
// calc paper, paperVertices; take orientation into account
if ( distanceP1P2 > distanceP1P3 ) {
scaleFactor = ceil( lroundf(distanceP1P2/paperHeight) ); // we always want to scale the image down to maintain the best quality possible
paperImage = cv::Mat( paperWidth*scaleFactor, paperHeight*scaleFactor, CV_8UC3 );
paperVertices[0] = cv::Point( 0, 0 );
paperVertices[1] = cv::Point( paperHeight*scaleFactor, 0 );
paperVertices[2] = cv::Point( 0, paperWidth*scaleFactor );
paperVertices[3] = cv::Point( paperHeight*scaleFactor, paperWidth*scaleFactor );
}
else {
isLandscape = false;
scaleFactor = ceil( lroundf(distanceP1P3/paperHeight) ); // we always want to scale the image down to maintain the best quality possible
paperImage = cv::Mat( paperHeight*scaleFactor, paperWidth*scaleFactor, CV_8UC3 );
paperVertices[0] = cv::Point( 0, 0 );
paperVertices[1] = cv::Point( paperWidth*scaleFactor, 0 );
paperVertices[2] = cv::Point( 0, paperHeight*scaleFactor );
paperVertices[3] = cv::Point( paperWidth*scaleFactor, paperHeight*scaleFactor );
}
cv::Mat warpMatrix = getPerspectiveTransform( imageVertices, paperVertices );
cv::warpPerspective(image, paperImage, warpMatrix, paperImage.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT );
if (true) {
cv::Rect rect = boundingRect(cv::Mat(square));
cv::rectangle(image, rect.tl(), rect.br(), cv::Scalar(0,255,0), 5, 8, 0);
UIImage *object = [self UIImageFromCVMat:paperImage];
}
// we want portrait output
if ( isLandscape ) {
cv::transpose(paperImage, paperImageCorrected);
cv::flip(paperImageCorrected, paperImageCorrected, 1);
return paperImageCorrected;
}
return paperImage;
}
EDITED: I used below method to get the color from image. But now my problem after converting my original image to cv::mat, when I am cropping there is already transparent grey color over my image. So always I am getting the same color.
Is there any direct way to get original color from cv::mat image?
- (UIColor *)averageColor: (UIImage *) image {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char rgba[4];
CGContextRef context = CGBitmapContextCreate(rgba, 1, 1, 8, 4, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), image.CGImage);
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
if(rgba[3] > 0) {
CGFloat alpha = ((CGFloat)rgba[3])/255.0;
CGFloat multiplier = alpha/255.0;
return [UIColor colorWithRed:((CGFloat)rgba[0])*multiplier
green:((CGFloat)rgba[1])*multiplier
blue:((CGFloat)rgba[2])*multiplier
alpha:alpha];
}
else {
return [UIColor colorWithRed:((CGFloat)rgba[0])/255.0
green:((CGFloat)rgba[1])/255.0
blue:((CGFloat)rgba[2])/255.0
alpha:((CGFloat)rgba[3])/255.0];
}
}
EDIT 2 :
Input Image
Getting this output
Need to detect only A4 sheet of white color.
I just resolved it using Google Vision api.
My objective was to calculate the cracks for builder purpose from image so in my case User will be using A4 sheet as reference on the image where crack is, and I will capture the A4 sheet and calculate the size taken by each pixel. Then build will tap on two points in the crack, and I will calculate the distance.
In google vision I used document text detection api and printed my app name on A4 sheet fully covered vertically or horizontally. And google vision api detect that text and gives me the coordinate.
I'm capturing a picture from a webcam with openCV. Then the frame should be transformed into an openGL texture and be shown on the screen. I got the following code but the window remains black. I'm very new to openGL and have no more ideas why it doesn't work.
int main ()
{
int w = 800,h=600;
glfwInit();
//configure glfw
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFWwindow* window = glfwCreateWindow(w, h, "OpenGL", NULL, nullptr); // windowed
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
glewInit();
initializeCapturing();
//init GL
glViewport(0, 0, w, h); // use a screen size of WIDTH x HEIGHT
glEnable(GL_TEXTURE_2D); // Enable 2D texturing
glMatrixMode(GL_PROJECTION); // Make a simple 2D projection on the entire window
glLoadIdentity();
glOrtho(0.0, w, h, 0.0, 0.0, 100.0);
glMatrixMode(GL_MODELVIEW); // Set the matrix mode to object modeling
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClearDepth(0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear the window
cv::Mat frame;
captureFromWebcam(frame,capture0);
/* OpenGL texture binding of the image loaded by DevIL */
GLuint texid;
glGenTextures(1, &texid); /* Texture name generation */
glBindTexture(GL_TEXTURE_2D, texid); /* Binding of texture name */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); /* We will use linear interpolation for magnification filter */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); /* We will use linear interpolation for minifying filter */
glTexImage2D(GL_TEXTURE_2D, 0,3, frame.size().width, frame.size().height, 0, GL_RGB, GL_UNSIGNED_BYTE, 0); /* Texture specification */
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
// Clear color and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindTexture(GL_TEXTURE_2D,texid);
glMatrixMode(GL_MODELVIEW); // Operate on model-view matrix
/* Draw a quad */
glBegin(GL_QUADS);
glTexCoord2i(0, 0); glVertex2i(0, 0);
glTexCoord2i(0, 1); glVertex2i(0, h);
glTexCoord2i(1, 1); glVertex2i(w, h);
glTexCoord2i(1, 0); glVertex2i(w, 0);
glEnd();
glFlush();
glfwSwapBuffers(window);
}
releaseCapturing();
glfwTerminate();
return 1;
}
and the other procedures
cv::VideoCapture capture0;
cv::VideoCapture capture1;
void captureFromWebcam(cv::Mat &frame, cv::VideoCapture &capture)
{
capture.read(frame);
}
bool initializeCapturing()
{
capture0.open(0);
capture1.open(1);
if(!capture0.isOpened() | !capture1.isOpened())
{
std::cout << "Ein oder mehrere VideoCaptures konnten nicht geƶffnet werden" << std::endl;
if(!capture0.isOpened())
capture0.release();
if(!capture1.isOpened())
capture1.release();
return false;
}
return true;
}
void releaseCapturing()
{
capture0.release();
capture1.release();
}
It looks like you mixed several code fragments you found at different places. You are rquesting an OpenGL 3.2 core profile. But the drawing code you are using is immediate mode with the fixed-function pipeline, which is not available in a core profile. You basically requested a modern GL context, but the drawing code is completely outdated and not supported any more by the GL version you selected.
As a quick fix, you could simply remove the following lines:
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
Doing so should provide you with some legacy GL context OR a compatibility profile of some modern context, depending on the operation system an GL implementation you use. However, you can expect from that to get at least OpenGL 2.1 that way (except on very old hardware not supporting GL2 at all, but even that would be OK for your code). The rest of the code should work in such a context.
I still suggest that you learn mordern GL instead of the old, deprecated legacy stuff you are using here.
You did not set the pointer from the frame.
glTexImage2D(GL_TEXTURE_2D, 0,3, frame.size().width, frame.size().height, 0, GL_RGB, GL_UNSIGNED_BYTE, (void*)frame.data()); /* Texture specification */
And with this code, you only are going to get the first frame.
Put "captureFromWebcam" and load the texture inside the while.
(Sorry for my english)
I have a problem with directx 11 rendering - if i try to render more then one model, i see just models with odd index. All model that are rendered with even index are not visible.
my code based on rastertek tutorials:
m_dx->BeginScene(0.0f, 0.0f, 0.0f, 1.0f);
{
m_camera->Render();
XMMATRIX view;
m_camera->GetViewMatrix(view);
XMMATRIX world;
m_dx->GetWorldMatrix(world);
XMMATRIX projection;
m_dx->GetProjectionMatrix(projection);
XMMATRIX ortho;
m_dx->GetOrthoMatrix(ortho);
world = XMMatrixTranslation(-2, 0, -4);
m_model->Render(m_dx->GetDeviceContext());
m_texture_shader->Render(m_dx->GetDeviceContext(), m_model->GetIndicesCount(), world, view, projection,
m_model->GetTexture());
world = XMMatrixTranslation(2, 0, -2);
m_model->Render(m_dx->GetDeviceContext());
m_texture_shader->Render(m_dx->GetDeviceContext(), m_model->GetIndicesCount(), world, view, projection,
m_model->GetTexture());
world = XMMatrixTranslation(0, 0, -3);
m_model->Render(m_dx->GetDeviceContext());
m_texture_shader->Render(m_dx->GetDeviceContext(), m_model->GetIndicesCount(), world, view, projection,
m_model->GetTexture());
}
m_dx->EndScene();
Model render method
UINT stride, offset;
stride = sizeof(VertexPosTextureNormal);
offset = 0;
device_context->IASetVertexBuffers(0, 1, &m_vertex_buffer, &stride, &offset);
device_context->IASetIndexBuffer(m_index_buffer, DXGI_FORMAT_R32_UINT, 0);
device_context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
Shader render method:
world = XMMatrixTranspose(world);
view = XMMatrixTranspose(view);
projection = XMMatrixTranspose(projection);
D3D11_MAPPED_SUBRESOURCE mapped_subres;
RETURN_FALSE_IF_FAILED(context->Map(m_matrix_buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mapped_subres));
MatrixBuffer* data = (MatrixBuffer*)mapped_subres.pData;
data->world = world;
data->view = view;
data->projection = projection;
context->Unmap(m_matrix_buffer, 0);
context->VSSetConstantBuffers(0, 1, &m_matrix_buffer);
context->PSSetShaderResources(0, 1, &texture);
// render
context->IASetInputLayout(m_layout);
context->VSSetShader(m_vertex_shader, NULL, 0);
context->PSSetShader(m_pixel_shader, NULL, 0);
context->PSSetSamplers(0, 1, &m_sampler_state);
context->DrawIndexed(indices, 0, 0);
What can be the reason of this?
thank you.
This code -
world = XMMatrixTranspose(world);
view = XMMatrixTranspose(view);
projection = XMMatrixTranspose(projection);
is transposing the same matrixes each time you call it so they only have the correct value alternate times. The world matrix is being reset each time in the calling code but the view and project matrices are wrong alternate times.