There is a segmentation fault in vkCmdBlitImage. According to Valgrind, it is an invalid read of size 8 with the address being 0x48. Disabling layers does not fix the problem.
The driver used is the Nvidia Linux driver version 364.19. The GPU is a GeForce GTX 970.
Relevant code:
VkImageCreateInfo img_info;
img_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
img_info.pNext = NULL;
img_info.flags = 0;
img_info.imageType = VK_IMAGE_TYPE_2D;
img_info.format = VK_FORMAT_R8G8B8A8_UNORM;
img_info.extent = (VkExtent3D){info.width, info.height, 1};
img_info.mipLevels = 1;
img_info.arrayLayers = 1;
img_info.samples = VK_SAMPLE_COUNT_1_BIT;
img_info.tiling = VK_IMAGE_TILING_LINEAR;
img_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
img_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
img_info.queueFamilyIndexCount = 0;
img_info.pQueueFamilyIndices = NULL;
img_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
VkImage src_image;
VKR(vkCreateImage(info.device, &img_info, NULL, &src_image));
VkMemoryRequirements src_req;
vkGetImageMemoryRequirements(info.device, src_image, &src_req);
VkDeviceMemory src_mem = create_memory(info.physical_device, info.device,
src_req.memoryTypeBits, src_req.size,
true); //The true makes it create host-visible memory.
vkBindImageMemory(info.device, src_image, src_mem, 0);
VkImageSubresource src_subres;
src_subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
src_subres.mipLevel = 0;
src_subres.arrayLayer = 0;
VkSubresourceLayout src_subres_layout;
vkGetImageSubresourceLayout(info.device, src_image, &src_subres, &src_subres_layout);
uint8_t* src_data = NULL;
VKR(vkMapMemory(info.device, src_mem, src_subres_layout.offset, src_subres_layout.rowPitch*info.height, 0, (void**)&src_data));
//Code that initialized src_data
VkMappedMemoryRange range;
range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
range.pNext = NULL;
range.memory = src_mem;
range.offset = src_subres_layout.offset;
range.size = src_subres_layout.rowPitch * info.height;
VKR(vkFlushMappedMemoryRanges(info.device, 1, &range));
vkUnmapMemory(info.device, src_mem);
VkCommandBufferAllocateInfo alloc_info;
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.commandPool = info.cmd_pool;
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = 1;
VkCommandBuffer cmd_buf;
VKR(vkAllocateCommandBuffers(info.device, &alloc_info, &cmd_buf));
VkCommandBufferBeginInfo begin_cmd_buf_info;
begin_cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_cmd_buf_info.pNext = NULL;
begin_cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
begin_cmd_buf_info.pInheritanceInfo = NULL;
vkBeginCommandBuffer(cmd_buf, &begin_cmd_buf_info);
image_barrier(VK_IMAGE_ASPECT_COLOR_BIT, cmd_buf, VK_ACCESS_HOST_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, src_image);
image_barrier(VK_IMAGE_ASPECT_COLOR_BIT, cmd_buf, info.dst_img_access,
VK_ACCESS_TRANSFER_WRITE_BIT, info.dst_img_layout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, info.dst_image);
VkImageBlit region;
region.srcSubresource = (VkImageSubresourceLayers){VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
region.srcOffsets[0] = (VkOffset3D){0, 0, 0};
region.srcOffsets[1] = (VkOffset3D){info.width, info.height, 1};
region.dstSubresource = (VkImageSubresourceLayers){VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
region.dstOffsets[0] = (VkOffset3D){0, 0, 0};
region.dstOffsets[1] = (VkOffset3D){info.width, info.height, 1};
vkCmdBlitImage(cmd_buf, src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, info.dst_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, VK_FILTER_NEAREST);
vkEndCommandBuffer(cmd_buf);
The rest of the code is found at https://gitlab.com/pendingchaos/WIP29/tree/00f348f2ef588e5f724fcb1f695e7692128cac4c/src.
Cut down output of vulkaninfo can be found at http://pastebin.com/JaHqCy98.
Your synchronization seems improper for the jorb. They discard your preinitialized Image and do no synchronization (due to the dst=BOTTOM).
Let me put together something that should work just fine with your computationaly demanding 4x4 Image processing:
image_barrier( VK_IMAGE_ASPECT_COLOR_BIT, cmd_buf,
VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
src_image);
image_barrier( VK_IMAGE_ASPECT_COLOR_BIT, cmd_buf,
info.dst_img_access, VK_ACCESS_TRANSFER_WRITE_BIT,
info.dst_img_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
info.dst_image);
BTW:
the amount should be VkDeviceSize not size_t in createMemory()
vkBindImageMemory(), vkBeginCommandBuffer() and vkEndCommandBuffer() return and should perhaps be in your VKR
If you rewrite whole aspect of the image, you can use src=LAYOUT_UNDEFINED to discard the old data (more efficient!)
Related
I am trying to make a desktop recorder, but all i get, is black screen, i have no clue why at all.
I tried with dx9, but same thing when i use backbuffer, front buffer method does work, and it can capture the frames correctly, but it's too slow (33ms per frame, and all because of GetFrontBuffer).
So i decided to try with dx11, there are no errors from return, no errors when creating swapchain and device, everything is fine, and in fact the frames are captured(i measure the time and fps, and something is going on), but they are all black, like it's not coming from the desktop, but from somewhere else.
This is the capture method
if(contains_errors()){return;}
m_swap_chain->GetBuffer(0, __uuidof(ID3D11Resource), (void**)&m_back_buffer_ptr);
return_if_null(m_back_buffer_ptr);
HRESULT hr = m_back_buffer_ptr->QueryInterface(__uuidof(ID3D11Resource), (void**)&m_back_buffer_data);
return_if_failed(hr);
hr = m_swap_chain->GetDevice(__uuidof(ID3D11Device), (void**)&m_device);
return_if_failed(hr);
hr = m_swap_chain->GetDesc(&m_desc);
return_if_failed(hr);
ID3D11Texture2D* texture = nullptr;
hr = m_device->CreateTexture2D(&m_tex_desc, 0, &texture);
return_if_failed(hr);
ID3D11DeviceContext* context = nullptr;
m_device->GetImmediateContext(&context);
return_if_null(context);
context->CopyResource(texture, m_back_buffer_data);
D3D11_MAPPED_SUBRESOURCE map_subres = {0, 0, 0};
hr = context->Map(texture, 0, D3D11_MAP_READ, 0, &map_subres);
return_if_failed(hr);
if(m_current_frame == 0)
{
m_current_frame = new BYTE[map_subres.DepthPitch];
}
memcpy(m_current_frame, map_subres.pData, map_subres.DepthPitch);
texture->Release();
m_device->Release();
This is the texture desc setup
ZeroMemory(&m_tex_desc, sizeof(m_tex_desc));
m_tex_desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
m_tex_desc.Width = m_desc.BufferDesc.Width;
m_tex_desc.Height = m_desc.BufferDesc.Height;
m_tex_desc.MipLevels = 1;
m_tex_desc.ArraySize = 1;
m_tex_desc.SampleDesc.Count = 1;
m_tex_desc.Usage = D3D11_USAGE_STAGING;
m_tex_desc.BindFlags = 0;
m_tex_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
m_tex_desc.MiscFlags = 0;
This is swapchain desc
m_desc.BufferDesc.Width = 1366;
m_desc.BufferDesc.Height = 768;
m_desc.BufferDesc.RefreshRate.Numerator = 1;
m_desc.BufferDesc.RefreshRate.Denominator = 60;
m_desc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
m_desc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
m_desc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
m_desc.SampleDesc.Count = 2;
m_desc.SampleDesc.Quality = 0;
m_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
m_desc.BufferCount = 1;
m_desc.OutputWindow = (HWND)m_dx_win->winId();
m_desc.Windowed = true;
m_desc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
m_desc.Flags = 0;
Class members
private:
IDXGISwapChain* m_swap_chain = 0;
ID3D11DeviceContext* m_context = 0;
Dx_Output_Window* m_dx_win = 0;
IDXGIResource* m_back_buffer_ptr = 0;
ID3D11Resource* m_back_buffer_data = 0;
ID3D11Device* m_device = 0;
D3D_FEATURE_LEVEL m_selected_feature;
DXGI_SWAP_CHAIN_DESC m_desc;
D3D11_TEXTURE2D_DESC m_tex_desc = {};
I look up basically all the resources i could, but i could not find any info why it does work, but the image is all black. I was thinking maybe there is something up with the display, but no, i took the raw data, and display the value, and all the pixel or whatever it was is, was exactly 0, which is black color.
In the "m_desc.OutputWindow = (HWND)m_dx_win->winId();" i tried to also use GetDesktopWindow(), but it doesn't change anything, in fact i got some warnings instead.
I have a DirectX rendering that works perfectly when I render to the screen:
var DxDevice = D3D.D3DDevice.CreateDeviceAndSwapChain(host.Handle);
...
var DxFinalRenderTexture = DxDevice.SwapChain.GetBuffer<D3D.Texture2D>(0);
var DxFinalRenderTarget = DxDevice.CreateRenderTargetView(DxFinalRenderTexture);
DxDevice.OM.RenderTargets = new D3D.OutputMergerRenderTargets(new D3D.RenderTargetView[] { DxFinalRenderTarget }, null);
DxTechnique.GetPassByIndex(0).Apply();
DxDevice.Draw(4, 0);
DxDevice.SwapChain.Present(0, GX.PresentOptions.None);
(input layout and vertex buffers are omitted here because, as I said, the rendering works as expected to the screen).
When I change it slightly to use a different render target rather than the screen:
var description = new D3D.Texture2DDescription() {
Width = 1024,
Height = 1024,
ArraySize = 1,
BindingOptions = D3D.BindingOptions.RenderTarget | D3D.BindingOptions.ShaderResource,
CpuAccessOptions = D3D.CpuAccessOptions.None,
Format = DX.Graphics.Format.B8G8R8A8UNorm,
MipLevels = 1,
MiscellaneousResourceOptions = D3D.MiscellaneousResourceOptions.None,
SampleDescription = new DX.Graphics.SampleDescription(1, 0),
Usage = D3D.Usage.Default
};
var DxFinalRenderTexture = DxDevice.CreateTexture2D(description);
var description2 = new D3D.RenderTargetViewDescription() {
Format = description.Format,
ViewDimension = D3D.RenderTargetViewDimension.Texture2D,
Texture2D = new D3D.Texture2DRenderTargetView() { MipSlice = 0 }
};
var DxFinalRenderTarget = DxDevice.CreateRenderTargetView(DxFinalRenderTexture, description2);
and finally save the texture by duly copying it to a staging resource and mapping the latter:
private WriteableBitmap GetTexture(D3D.Texture2D texture) {
var description = new D3D.Texture2DDescription() {
Width = 1024,
Height = 1024,
ArraySize = 1,
BindingOptions = D3D.BindingOptions.None,
CpuAccessOptions = D3D.CpuAccessOptions.Read,
Format = DX.Graphics.Format.B8G8R8A8UNorm,
MipLevels = 1,
MiscellaneousResourceOptions = D3D.MiscellaneousResourceOptions.None,
SampleDescription = new DX.Graphics.SampleDescription(1, 0),
Usage = D3D.Usage.Staging
};
var texture2 = DxDevice.CreateTexture2D(description);
DxDevice.CopyResource(texture2, texture);
var texmap = texture2.Map(0, D3D.Map.Read, D3D.MapOptions.None);
var bitmap = new WriteableBitmap(1024, 1024, 96, 96, PixelFormats.Pbgra32, null);
bitmap.Lock();
bitmap.WritePixels(new Int32Rect(0, 0, 1024, 1024), texmap.Data, 1024 * 1024 * 4, 1024 * 4);
bitmap.Unlock();
texture2.Unmap(0);
return bitmap;
}
the resulting bitmap will be completely empty. There are no errors either in compilation or during the run, just the emptiness.
It might also be worth noting that I don't need repeatedly rendered frames, just a single bitmap created, so there are no performance issues involved. I'd be happy to do away with the screen rendering completely and render to a final bitmap that I can use later.
I have a text and I want to train by adding feature using the java API. Looking at the examples the main class to build the training set is the svm_problem. It appear like the svm_node represents a feature (the index is the feature and the value is the weight of the feature).
What I have done is to have a map (just to simplify the problem) that keeps an association between the feature and an index. For each of my weight> example I do create a new node :
svm_node currentNode = new svm_node();
int index = feature.getIndexInMap();
double value = feature.getWeight();
currentNode.index = index;
currentNode.value = value;
Is my intuition correct? What does the svm_problem.y refers to? Does it refer to the index of the label? Is the svm_problem.l just the length of the two vectors?
Your intuition is very close, but svm_node is a pattern not a feature. The variable svm_problem.y is an array that contains the labels of each pattern and svm_problem.l is the size of the training set.
Also, beware that svm_parameter.nr_weight is the weight of each label (useful if you have an unbalanced training set) but if you are not going to use it you must set that value to zero.
Let me show you a simple example in C++:
#include "svm.h"
#include <iostream>
using namespace std;
int main()
{
svm_parameter params;
params.svm_type = C_SVC;
params.kernel_type = RBF;
params.C = 1;
params.gamma = 1;
params.nr_weight = 0;
params.p= 0.0001;
svm_problem problem;
problem.l = 4;
problem.y = new double[4]{1,-1,-1,1};
problem.x = new svm_node*[4];
{
problem.x[0] = new svm_node[3];
problem.x[0][0].index = 1;
problem.x[0][0].value = 0;
problem.x[0][1].index = 2;
problem.x[0][1].value = 0;
problem.x[0][2].index = -1;
}
{
problem.x[1] = new svm_node[3];
problem.x[1][0].index = 1;
problem.x[1][0].value = 1;
problem.x[1][1].index = 2;
problem.x[1][1].value = 0;
problem.x[1][2].index = -1;
}
{
problem.x[2] = new svm_node[3];
problem.x[2][0].index = 1;
problem.x[2][0].value = 0;
problem.x[2][1].index = 2;
problem.x[2][1].value = 1;
problem.x[2][2].index = -1;
}
{
problem.x[3] = new svm_node[3];
problem.x[3][0].index = 1;
problem.x[3][0].value = 1;
problem.x[3][1].index = 2;
problem.x[3][1].value = 1;
problem.x[3][2].index = -1;
}
for(int i=0; i<4; i++)
{
cout << problem.y[i] << endl;
}
svm_model * model = svm_train(&problem, ¶ms);
svm_save_model("mymodel.svm", model);
for(int i=0; i<4; i++)
{
double d = svm_predict(model, problem.x[i]);
cout << "Prediction " << d << endl;
}
/* We should free the memory at this point.
But this example is large enough already */
}
I have one WDDM user mode display driver for DX9. Now I would like to dump the
render target's back buffer to a bmp file. Since the render target resource is
not lockable, I have to create a resource from system buffer and bitblt from the
render target to the system buffer and then save the system buffer to the bmp
file. However, calling the bitblt always return the error code E_FAIL. I also
tried to call the pfnCaptureToSysMem which also returned the same error code.
Anything wrong here?
D3DDDI_SURFACEINFO nfo;
nfo.Depth = 0;
nfo.Width = GetRenderSize().cx;
nfo.Height = GetRenderSize().cy;
nfo.pSysMem = NULL;
nfo.SysMemPitch = 0;
nfo.SysMemSlicePitch = 0;
D3DDDIARG_CREATERESOURCE resource;
resource.Format = D3DDDIFMT_A8R8G8B8;
resource.Pool = D3DDDIPOOL_SYSTEMMEM;
resource.MultisampleType = D3DDDIMULTISAMPLE_NONE;
resource.MultisampleQuality = 0;
resource.pSurfList = &nfo;
resource.SurfCount = 1;
resource.MipLevels = 1;
resource.Fvf = 0;
resource.VidPnSourceId = 0;
resource.RefreshRate.Numerator = 0;
resource.RefreshRate.Denominator = 0;
resource.hResource = NULL;
resource.Flags.Value = 0;
resource.Flags.Texture = 1;
resource.Flags.Dynamic = 1;
resource.Rotation = D3DDDI_ROTATION_IDENTITY;
HRESULT hr = m_pDevice->m_deviceFuncs.pfnCreateResource(m_pDevice->GetDrv(), &resource);
HANDLE hSysSpace = resource.hResource;
D3DDDIARG_BLT blt;
blt.hSrcResource = m_pDevice->m_hRenderTarget;
blt.hDstResource = hSysSpace;
blt.SrcRect.left = 0;
blt.SrcRect.top = 0;
blt.SrcRect.right = GetRenderSize().cx;
blt.SrcRect.bottom = GetRenderSize().cy;
blt.DstRect = blt.SrcRect;
blt.DstSubResourceIndex = 0;
blt.SrcSubResourceIndex = 0;
blt.Flags.Value = 0;
blt.ColorKey = 0;
hr = m_pDevice->m_deviceFuncs.pfnBlt(m_pDevice, &blt);
You are on the right track, but I think you can use the DirectX functions for this.
In order to copy the render target from video memory to system memory you should use the IDirect3DDevice9::GetRenderTargetData() function.
This function requires that the destination surface is an offscreen plain surface created with pool D3DPOOL_SYSTEMMEM. This surface also must have the same dimensions as the render target (no stretching allowed). Use IDirect3DDevice9::CreateOffscreenPlain() to create this surface.
Then this surface can be locked and the color data can be accessed by the CPU.
I want a chart with transparent background, and therefore PNG seems a good choice. But when I set transparent background, the quality of the axis labels falls dramatically. How do I fix this? See the following code. As it stands, the chart has a transparent background, as I want, but the text quality is atrocious. If I comment out the two "Color.Transparent" settings, then the text quality is nice, but the background is not transparent.
How do I get transparency and nice text?
public static void Main(string[] args)
{
Chart c = new Chart();
c.TextAntiAliasingQuality = TextAntiAliasingQuality.High;
Series s = new Series("Series1");
c.Series.Clear();
c.Series.Add(s);
s.ChartType = SeriesChartType.Line;
s.Color = Color.Black;
ChartArea chartArea = new ChartArea("ChartArea1");
c.ChartAreas.Clear();
c.ChartAreas.Add(chartArea);
chartArea.BackColor = Color.FromArgb(255, 255, 255);
chartArea.BackSecondaryColor = Color.FromArgb(220, 220, 220);
chartArea.BackGradientStyle = GradientStyle.TopBottom;
chartArea.AxisX.LineColor = Color.Gray;
chartArea.AxisX.LineWidth = 2;
chartArea.AxisX.LineDashStyle = ChartDashStyle.Solid;
chartArea.AxisY.LineColor = Color.Gray;
chartArea.AxisY.LineWidth = 2;
chartArea.AxisY.LineDashStyle = ChartDashStyle.Solid;
chartArea.AxisX.MajorGrid.LineColor = Color.LightGray;
chartArea.AxisX.MajorGrid.LineDashStyle = ChartDashStyle.Dash;
chartArea.AxisY.MajorGrid.LineColor = Color.LightGray;
chartArea.AxisY.MajorGrid.LineDashStyle = ChartDashStyle.Dash;
c.BackColor = Color.Transparent;
chartArea.BackColor = Color.Transparent;
double[] x = new double[] { 1999, 2005 };
double[] y = new double[] { 3210, 13456 };
Axis ay = chartArea.AxisY;
ay.Maximum = 13456;
ay.Minimum = 3210;
Axis ax = chartArea.AxisX;
ax.Maximum = 2005;
ax.Minimum = 1999;
for (int i = 0; i < x.Length; i++)
{
double xvalue = x[i];
double yvalue = y[i];
s.Points.AddXY(xvalue, yvalue);
}
// Save chart-image to disk:
c.SaveImage("chartimage.png", ChartImageFormat.Png);
}
Set chart's AntiAliasing to AntiAliasingStyles.Graphics to disable the antialiasing on text.
Taken from this thread.
Maybe this help you
in your .aspx file where your chart code is, look for the asp:ChartArea tag. then add BackColor = "Transparent".
<asp:ChartArea Name="ChartArea1" BackColor="Transparent"
</asp:ChartArea>
Hope this help.
chart.TextAntiAliasingQuality = TextAntiAliasingQuality.SystemDefault;
I read this from here: http://forums.asp.net/p/1656335/4315304.aspx?Re%20Chart%20transparency%20and%20text%20quality