Draw/Remove geometrical shapes on render target using DirectX 11 - directx

I am trying to draw/remove geometrical shapes on render target which displays a image.
During each frame render, a new image is rendered to the render target by updating the resource via texture mapping which works perfectly as expected.
Now, I'm trying to draw a new geometrical shape filled with a solid color on top of the render target, which is only done during the rendering of every 10th frame.
However currently stuck as to how i should approach this.
I'm using DirectX 11 on windows 7 PC with C# (slimDx or sharpDx for DirectX).
Any suggestion would be great.
Thanks.
Code:
During rendring loop i added the code below to draw the overlay which is a triangle in my case.
var device = this.Device;
var context = device.ImmediateContext;
var effectsFileResource = Properties.Resources.ShapeEffect;
ShaderBytecode shaderByteCode = ShaderBytecode.Compile(effectsFileResource, "fx_5_0", ShaderFlags.EnableStrictness | ShaderFlags.Debug, EffectFlags.None);
var effect = new Effect(device, shaderByteCode);
// create triangle vertex data, making sure to rewind the stream afterward
var verticesTriangle = new DataStream(VertexPositionColor.SizeInBytes * 3, true, true);
verticesTriangle.Write(new VertexPositionColor(new Vector3(0.0f, 0.5f, 0.5f), new Color4(1.0f, 0.0f, 0.0f, 1.0f)));
verticesTriangle.Write(new VertexPositionColor(new Vector3(0.5f, -0.5f, 0.5f), new Color4(0.0f, 1.0f, 0.0f, 1.0f)));
verticesTriangle.Write(new VertexPositionColor(new Vector3(-0.5f, -0.5f, 0.5f), new Color4(0.0f, 0.0f, 1.0f, 1.0f)));
verticesTriangle.Position = 0;
// create the triangle vertex layout and buffer
var layoutColor = new InputLayout(device, effect.GetTechniqueByName("Color").GetPassByIndex(0).Description.Signature, VertexPositionColor.inputElements);
var vertexBufferColor = new SharpDX.Direct3D11.Buffer(device, verticesTriangle, (int)verticesTriangle.Length, ResourceUsage.Default, BindFlags.VertexBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
verticesTriangle.Close();
var srv = new ShaderResourceView(device, this.RenderTarget);
effect.GetVariableByName("g_Overlay").AsShaderResource().SetResource(srv);
// Think of the shared textureD3D10 as an overlay.
// The overlay needs to show the 2d content but let the underlying triangle (or whatever)
// show thru, which is accomplished by blending.
var bsd = new BlendStateDescription();
bsd.RenderTarget[0].IsBlendEnabled = true;
bsd.RenderTarget[0].SourceBlend = BlendOption.SourceColor;
bsd.RenderTarget[0].DestinationBlend = BlendOption.BlendFactor;
bsd.RenderTarget[0].BlendOperation = BlendOperation.Add;
bsd.RenderTarget[0].SourceAlphaBlend = BlendOption.One;
bsd.RenderTarget[0].DestinationAlphaBlend = BlendOption.Zero;
bsd.RenderTarget[0].AlphaBlendOperation = BlendOperation.Add;
bsd.RenderTarget[0].RenderTargetWriteMask = ColorWriteMaskFlags.All;
var blendStateTransparent = new BlendState(device, bsd);
context.InputAssembler.InputLayout = layoutColor;
context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleStrip;
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(vertexBufferColor, VertexPositionColor.SizeInBytes, 0));
context.OutputMerger.BlendState = blendStateTransparent;
var currentTechnique = effect.GetTechniqueByName("Color");
for (var pass = 0; pass < currentTechnique.Description.PassCount; ++pass)
{
using (var effectPass = currentTechnique.GetPassByIndex(pass))
{
System.Diagnostics.Debug.Assert(effectPass.IsValid, "Invalid EffectPass");
effectPass.Apply(context);
}
context.Draw(3, 0);
};
srv.Dispose();
Also below is the shader file for the effect:
Texture2D g_Overlay;
SamplerState g_samLinear
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU = CLAMP;
AddressV = CLAMP;
};
// ------------------------------------------------------
// A shader that accepts Position and Color
// ------------------------------------------------------
struct ColorVS_IN
{
float4 pos : POSITION;
float4 col : COLOR;
};
struct ColorPS_IN
{
float4 pos : SV_POSITION;
float4 col : COLOR;
};
ColorPS_IN ColorVS(ColorVS_IN input)
{
ColorPS_IN output = (ColorPS_IN)0;
output.pos = input.pos;
output.col = input.col;
return output;
}
float4 ColorPS(ColorPS_IN input) : SV_Target
{
return input.col;
}
// ------------------------------------------------------
// A shader that accepts Position and Texture
// Used as an overlay
// ------------------------------------------------------
struct OverlayVS_IN
{
float4 pos : POSITION;
float2 tex : TEXCOORD0;
};
struct OverlayPS_IN
{
float4 pos : SV_POSITION;
float2 tex : TEXCOORD0;
};
OverlayPS_IN OverlayVS(OverlayVS_IN input)
{
OverlayPS_IN output = (OverlayPS_IN)0;
output.pos = input.pos;
output.tex = input.tex;
return output;
}
float4 OverlayPS(OverlayPS_IN input) : SV_Target
{
float4 color = g_Overlay.Sample(g_samLinear, input.tex);
return color;
}
// ------------------------------------------------------
// Techniques
// ------------------------------------------------------
technique11 Color
{
pass P0
{
SetGeometryShader(0);
SetVertexShader(CompileShader(vs_4_0, ColorVS()));
SetPixelShader(CompileShader(ps_4_0, ColorPS()));
}
}
technique11 Overlay
{
pass P0
{
SetGeometryShader(0);
SetVertexShader(CompileShader(vs_4_0, OverlayVS()));
SetPixelShader(CompileShader(ps_4_0, OverlayPS()));
}
}
The above code has been taken from : SharedResources using SharpDX

Related

how to use MTLSamplerState instead of declaring a sampler in my fragment shader code?

I have the shader below where I define a sampler (constexpr sampler textureSampler (mag_filter::linear,min_filter::linear);).
using namespace metal;
struct ProjectedVertex {
'float4 position [[position]];
'float2 textureCoord;
};
fragment float4 fragmentShader(const ProjectedVertex in [[stage_in]],
const texture2d<float> colorTexture [[texture(0)]],
constant float4 &opacity [[buffer(1)]]){
constexpr sampler textureSampler (mag_filter::linear,min_filter::linear);
const float4 colorSample = colorTexture.sample(textureSampler, in.textureCoord);
return colorSample*opacity[0];
}
Now I would like to avoid to hardly define this sampler inside my shader code. I found MTLSamplerState But I don't know how to use it
To create a sampler, first create a MTLSamplerDescriptor object and configure the descriptor’s properties. Then call the newSamplerStateWithDescriptor: method on the MTLDevice object that will use this sampler. After you create the sampler, you can release the descriptor or reconfigure its properties to create other samplers.
// Create default sampler state
MTLSamplerDescriptor *samplerDesc = [MTLSamplerDescriptor new];
samplerDesc.rAddressMode = MTLSamplerAddressModeRepeat;
samplerDesc.sAddressMode = MTLSamplerAddressModeRepeat;
samplerDesc.tAddressMode = MTLSamplerAddressModeRepeat;
samplerDesc.minFilter = MTLSamplerMinMagFilterLinear;
samplerDesc.magFilter = MTLSamplerMinMagFilterLinear;
samplerDesc.mipFilter = MTLSamplerMipFilterNotMipmapped;
id<MTLSamplerState> ss = [device newSamplerStateWithDescriptor:samplerDesc];
Sets a sampler state for the fragment function:
id<MTLRenderCommandEncoder> encoder = [commandBuffer renderCommandEncoderWithDescriptor: passDescriptor];
...
[encoder setFragmentSamplerState: ss atIndex:0];
Accessing from the shader:
fragment float4 albedoMainFragment(ImageColor in [[stage_in]],
texture2d<float> diffuseTexture [[texture(0)]],
sampler smp [[sampler(0)]]) {
float4 color = diffuseTexture.sample(smp, in.texCoord);
return color;
}
How to create SamplerState
First, declare MTLSamplerDescriptor and configure some properties such as addressModes, magFilter, minFilter.
Second, call makeSamplerState method from MTLDevice. Most cases default device.
That
You can use the below code. I hope it helps.
private static func buildSamplerState() -> MTLSamplerState? {
let descriptor = MTLSamplerDescriptor()
descriptor.sAddressMode = .repeat // .clampToEdge, .mirrorRepeat, .clampToZero
descriptor.tAddressMode = .repeat // .clampToEdge, .mirrorRepeat, .clampToZero
descriptor.magFilter = .linear // .nearest
descriptor.minFilter = .linear // .nearest
let samplerState = MTLCreateSystemDefaultDevice()?.makeSamplerState(descriptor: descriptor)
return samplerState
}
How to use it
...
let samplerState = buildSamplerState()
...
// call `makeRenderCommandEncoder` to create commandEncoder from commandBuffer
let commandEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor)
commandEncoder.setFragmentSamplerState(samplerState, index: 0)
in your fragment shader
fragment float4 exampleShader(VertexIO inputFragment [[stage_in]],
sampler textureSampler [[sampler(0)]],
texture2d<float> inputTexture [[texture(0)]])
{
float2 position = inputFragment.textureCoord;
// ...
return inputTexture.sample(textureSampler, position);
}

GPUImage2: passing gl texture to shader

I'm trying to modify GPUImage2 Crosshair generator to replace default crosshairs with gl texture made of UIImage, my image is 128x128 png file.
I'm using GPUImage2 PictureInput for converting UIImage to gl texture and it returns the instance of PictureInput without any issues/warnings.
Next I'm modifying CrosshairVertexShader to add support of the new texture:
uniform sampler2D inputImageTexture;
attribute vec4 position;
varying vec2 centerLocation;
varying float pointSpacing;
void main() {
gl_Position = vec4(((position.xy * 2.0) - 1.0), 0.0, 1.0);
gl_PointSize = crosshairWidth + 1.0;
pointSpacing = 1.0 / crosshairWidth;
centerLocation = vec2(
pointSpacing * ceil(crosshairWidth / 2.0),
pointSpacing * ceil(crosshairWidth / 2.0));
}
then pass it to modified CrosshairFragmentShader and render on screen(?):
uniform sampler2D inputImageTexture;
varying lowp vec3 crosshairColor;
varying highp vec2 centerLocation;
varying highp float pointSpacing;
void main() {
highp vec4 tex = texture2D(inputImageTexture, centerLocation);
gl_FragColor = vec4(tex.r, tex.g, tex.b, tex.a);
}
Here is the code of modified CrosshairGenerator:
import UIKit
public class CrosshairGenerator: ImageGenerator {
public var crosshairWidth:Float = 5.0 { didSet { uniformSettings["crosshairWidth"] = crosshairWidth } }
public var crosshairColor:Color = Color.green { didSet { uniformSettings["crosshairColor"] = crosshairColor } }
public var crosshairImage: PictureInput = PictureInput(imageName:"monohrome.png") {
didSet {
uniformSettings["inputImageTexture"] = crosshairImage //Here I pass the texture to shader?
crosshairImage.processImage()
}
}
let crosshairShader:ShaderProgram
var uniformSettings = ShaderUniformSettings()
public override init(size:Size) {
crosshairShader = crashOnShaderCompileFailure("CrosshairGenerator"){try sharedImageProcessingContext.programForVertexShader(vertexShader, fragmentShader: fragmentShader)}
super.init(size:size)
({crosshairWidth = 5.0})()
({crosshairColor = Color.green})()
({crosshairImage = PictureInput(imageName:"monohrome.png")})()
}
public func renderCrosshairs(_ positions:[Position]) {
imageFramebuffer.activateFramebufferForRendering()
imageFramebuffer.timingStyle = .stillImage
#if GL
glEnable(GLenum(GL_POINT_SPRITE))
glEnable(GLenum(GL_VERTEX_PROGRAM_POINT_SIZE))
#else
glEnable(GLenum(GL_POINT_SPRITE_OES))
#endif
crosshairShader.use()
uniformSettings.restoreShaderSettings(crosshairShader)
clearFramebufferWithColor(Color.transparent)
guard let positionAttribute = crosshairShader.attributeIndex("position") else { fatalError("A position attribute was missing from the shader program during rendering.") }
let convertedPositions = positions.flatMap{$0.toGLArray()}
glVertexAttribPointer(positionAttribute, 2, GLenum(GL_FLOAT), 0, 0, convertedPositions)
glDrawArrays(GLenum(GL_POINTS), 0, GLsizei(positions.count))
notifyTargets()
}
}
the initialization of corner detector remains the same:
FilterOperation(
filter:{HarrisCornerDetector()},
listName:"Harris corner detector",
titleName:"Harris Corner Detector",
sliderConfiguration:.enabled(minimumValue:0.01, maximumValue:0.70, initialValue:0.20),
sliderUpdateCallback: {(filter, sliderValue) in
filter.threshold = sliderValue
},
filterOperationType:.custom(filterSetupFunction:{(camera, filter, outputView) in
let castFilter = filter as! HarrisCornerDetector
// TODO: Get this more dynamically sized
#if os(iOS)
let crosshairGenerator = CrosshairGenerator(size:Size(width:480, height:640))
#else
let crosshairGenerator = CrosshairGenerator(size:Size(width:1280, height:720))
#endif
crosshairGenerator.crosshairWidth = 30.0
castFilter.cornersDetectedCallback = { corners in
crosshairGenerator.renderCrosshairs(corners)
}
camera --> castFilter
let blendFilter = AlphaBlend()
camera --> blendFilter --> outputView
crosshairGenerator --> blendFilter
return blendFilter
})
)
The code compiles and works fine, but the texture looks empty (coloured squares) when rendered on screen:
if I comment out this line - uniformSettings["inputImageTexture"] = crosshairImage - the result remains the same which makes me think the texture isn't passed to shader at all.
What I'm missing here?

DirectX 11 Blending

How can i access pixel colors of destination pixel in pixel shader, in order to use my specific blending equation, when control goes to pixel shader i only have the source pixel position and color, i want to know what is the color of destination pixel at that time..?
One approach i have heard is by using textures, but i am not able to find the way through textures.
Programmable blending is not allowed in directX 11, but with some hacks it is possible.
void D3D12HelloTriangle::LoadPipeline()
{
UINT dxgiFactoryFlags = 0;
ComPtr<IDXGIFactory4> factory;
ThrowIfFailed(CreateDXGIFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)));
//Device Creation
{
ComPtr<IDXGIAdapter1> hardwareAdapter;
GetHardwareAdapter(factory.Get(), &hardwareAdapter);
ThrowIfFailed(D3D12CreateDevice(
hardwareAdapter.Get(),
D3D_FEATURE_LEVEL_11_0,
IID_PPV_ARGS(&m_device)
));
}
// Describe and create the command queue.
D3D12_COMMAND_QUEUE_DESC queueDesc = {};
queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
ThrowIfFailed(m_device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&m_commandQueue)));
// Describe and create the swap chain.
DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
swapChainDesc.BufferCount = FrameCount;
swapChainDesc.Width = m_width;
swapChainDesc.Height = m_height;
swapChainDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
swapChainDesc.SampleDesc.Count = 1;
ComPtr<IDXGISwapChain1> swapChain;
ThrowIfFailed(factory->CreateSwapChainForCoreWindow(
m_commandQueue.Get(), // Swap chain needs the queue so that it can force a flush on it.
reinterpret_cast<IUnknown*>(Windows::UI::Core::CoreWindow::GetForCurrentThread()),
&swapChainDesc,
nullptr,
&swapChain
));
ThrowIfFailed(swapChain.As(&m_swapChain));
m_frameIndex = m_swapChain->GetCurrentBackBufferIndex();
// Create descriptor heaps.
{
// Describe and create a render target view (RTV) descriptor heap.
D3D12_DESCRIPTOR_HEAP_DESC rtvHeapDesc = {};
rtvHeapDesc.NumDescriptors = FrameCount;
rtvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV;
rtvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
ThrowIfFailed(m_device->CreateDescriptorHeap(&rtvHeapDesc, IID_PPV_ARGS(&m_rtvHeap)));
m_rtvDescriptorSize = m_device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
}
// Create frame resources.
{
CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHandle(m_rtvHeap->GetCPUDescriptorHandleForHeapStart());
// Create a RTV for each frame.
for (UINT n = 0; n < FrameCount; n++)
{
ThrowIfFailed(m_swapChain->GetBuffer(n, IID_PPV_ARGS(&m_renderTargets[n])));
m_device->CreateRenderTargetView(m_renderTargets[n].Get(), nullptr, rtvHandle);
rtvHandle.Offset(1, m_rtvDescriptorSize);
}
}
ThrowIfFailed(m_device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&m_commandAllocator)));
}
// Load the sample assets.
void D3D12HelloTriangle::LoadAssets()
{
// Create an empty root signature.
{
CD3DX12_ROOT_SIGNATURE_DESC rootSignatureDesc;
rootSignatureDesc.Init(0, nullptr, 0, nullptr, D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT);
ComPtr<ID3DBlob> signature;
ComPtr<ID3DBlob> error;
ThrowIfFailed(D3D12SerializeRootSignature(&rootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error));
ThrowIfFailed(m_device->CreateRootSignature(0, signature->GetBufferPointer(), signature->GetBufferSize(), IID_PPV_ARGS(&m_rootSignature)));
}
// Create the pipeline state, which includes compiling and loading shaders.
{
ComPtr<ID3DBlob> vertexShader;
ComPtr<ID3DBlob> pixelShader;
#if defined(_DEBUG)
// Enable better shader debugging with the graphics debugging tools.
UINT compileFlags = D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
#else
UINT compileFlags = 0;
#endif
ThrowIfFailed(D3DCompileFromFile(GetAssetFullPath(L"VertexShader.hlsl").c_str(), nullptr, nullptr, "VSMain", "vs_5_0", compileFlags, 0, &vertexShader, nullptr));
ThrowIfFailed(D3DCompileFromFile(GetAssetFullPath(L"PixelShader.hlsl").c_str(), nullptr, nullptr, "PSMain", "ps_5_0", compileFlags, 0, &pixelShader, nullptr));
// Define the vertex input layout.
D3D12_INPUT_ELEMENT_DESC inputElementDescs[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
{ "DELAY", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 },
};
// Describe and create the graphics pipeline state object (PSO).
D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {};
psoDesc.InputLayout = { inputElementDescs, _countof(inputElementDescs) };
psoDesc.pRootSignature = m_rootSignature.Get();
psoDesc.VS = CD3DX12_SHADER_BYTECODE(vertexShader.Get());
psoDesc.PS = CD3DX12_SHADER_BYTECODE(pixelShader.Get());
psoDesc.RasterizerState = CD3DX12_RASTERIZER_DESC(D3D12_DEFAULT);
psoDesc.BlendState = CD3DX12_BLEND_DESC(D3D12_DEFAULT);
psoDesc.DepthStencilState.DepthEnable = FALSE;
psoDesc.DepthStencilState.StencilEnable = FALSE;
psoDesc.SampleMask = 1;
psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
psoDesc.NumRenderTargets = 2;
psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM;
psoDesc.RTVFormats[1] = DXGI_FORMAT_R8G8B8A8_UNORM;
//psoDesc.RTVFormats[2] = DXGI_FORMAT_R8G8B8A8_UNORM;
psoDesc.SampleDesc.Count = 1;
psoDesc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE;
//ThrowIfFailed(m_device->CreateGraphicsPipelineState(&psoDesc, IID_PPV_ARGS(&m_pipelineState)));
D3D12_GRAPHICS_PIPELINE_STATE_DESC transparentPsoDesc = psoDesc;
D3D12_RENDER_TARGET_BLEND_DESC transparencyBlendDesc;
transparencyBlendDesc.BlendEnable = true;
transparencyBlendDesc.LogicOpEnable = false;
transparencyBlendDesc.SrcBlend = D3D12_BLEND_ONE;
transparencyBlendDesc.DestBlend = D3D12_BLEND_ONE;
transparencyBlendDesc.BlendOp = D3D12_BLEND_OP_MAX;
transparencyBlendDesc.SrcBlendAlpha = D3D12_BLEND_ONE;
transparencyBlendDesc.DestBlendAlpha = D3D12_BLEND_ONE;
transparencyBlendDesc.BlendOpAlpha = D3D12_BLEND_OP_MAX;
transparencyBlendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
transparencyBlendDesc.RenderTargetWriteMask = D3D12_COLOR_WRITE_ENABLE_ALL;
transparentPsoDesc.BlendState.RenderTarget[0] =
transparencyBlendDesc;
ThrowIfFailed(m_device->CreateGraphicsPipelineState(&transparentPsoDesc,IID_PPV_ARGS(&m_pipelineState)));
ThrowIfFailed(m_device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, m_commandAllocator.Get(), m_pipelineState.Get(), IID_PPV_ARGS(&m_commandList)));
}
// Command lists are created in the recording state, but there is nothing
// to record yet. The main loop expects it to be closed, so close it now.
ThrowIfFailed(m_commandList->Close());
// Create the vertex buffer.
{
// Define the geometry for a triangle.
Vertex triangleVertices[] =
{
{ { 0.0f, 0.0f * m_aspectRatio, 0.0f },{ 0.0f, 0.0f, 1.0f,1.0f },{ 0.0f,0.0f,0.0f,0.0f } },
{ { 0.25f, 0.0f * m_aspectRatio, 0.0f },{ 0.0f, 0.0f, 1.0f, 1.0f },{ 0.0f,0.0f,0.0f,0.0f } },
{ { 0.0f, 0.25f * m_aspectRatio, 0.0f },{ 0.0f, 0.0f, 1.0f, 1.0f },{ 0.0f,0.0f,0.0f,0.0f } },
{ { 0.25f, 0.25f * m_aspectRatio, 0.0f },{ 0.0f,0.0f,1.0f, 1.0f },{ 0.0f,0.0f,0.0f,0.0f } },
};
// Cube vertices. Each vertex has a position and a color.
Vertex triangleVertices2[] =
{
{ { 0.0f, 0.0f * m_aspectRatio, 0.0f },{ 0.0f, 1.0f, 0.0f, 1.0f },{ 0.0f,0.0f,0.0f,1.0f } },
{ { 0.5f, 0.0f * m_aspectRatio, 0.0f },{ 0.0f, 1.0f, 0.0f, 1.0f },{ 0.0f,0.0f,0.0f,1.0f } },
{ { 0.0f, 0.5f * m_aspectRatio, 0.0f },{0.0f, 1.0f, 0.0f, 1.0f },{ 0.0f,0.0f,0.0f,1.0f } },
{ { 0.5f, 0.5f * m_aspectRatio, 0.0f },{ 0.0f,1.0f, 0.0f, 1.0f },{ 0.0f,0.0f,0.0f,1.0f } },
};
const UINT vertexBufferSize = sizeof(triangleVertices);
const UINT my_vertexBufferSize = sizeof(triangleVertices2);
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
ThrowIfFailed(m_device->CreateCommittedResource(
&CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD),
D3D12_HEAP_FLAG_NONE,
&CD3DX12_RESOURCE_DESC::Buffer(vertexBufferSize),
D3D12_RESOURCE_STATE_GENERIC_READ,
nullptr,
IID_PPV_ARGS(&m_vertexBuffer)));
ThrowIfFailed(m_device->CreateCommittedResource(
&CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD),
D3D12_HEAP_FLAG_NONE,
&CD3DX12_RESOURCE_DESC::Buffer(my_vertexBufferSize),
D3D12_RESOURCE_STATE_GENERIC_READ,
nullptr,
IID_PPV_ARGS(&my_vertexBuffer)));
// Copy the triangle data to the vertex buffer.
UINT8* pVertexDataBegin;
CD3DX12_RANGE readRange(0, 0); // We do not intend to read from this resource on the CPU.
ThrowIfFailed(m_vertexBuffer->Map(0, &readRange, reinterpret_cast<void**>(&pVertexDataBegin)));
UINT8* my_pVertexDataBegin;
CD3DX12_RANGE my_readRange(0, 0); // We do not intend to read from this resource on the CPU.
ThrowIfFailed(my_vertexBuffer->Map(0, &my_readRange, reinterpret_cast<void**>(&my_pVertexDataBegin)));
memcpy(pVertexDataBegin, triangleVertices, sizeof(triangleVertices));
m_vertexBuffer->Unmap(0, nullptr);
memcpy(my_pVertexDataBegin, triangleVertices2, sizeof(triangleVertices2));
my_vertexBuffer->Unmap(0, nullptr);
// Initialize the vertex buffer view.
m_vertexBufferView[0].BufferLocation = m_vertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView[0].StrideInBytes = sizeof(Vertex);
m_vertexBufferView[0].SizeInBytes = vertexBufferSize;
m_vertexBufferView[1].BufferLocation = my_vertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView[1].StrideInBytes = sizeof(Vertex);
m_vertexBufferView[1].SizeInBytes = vertexBufferSize;
}
// Create synchronization objects and wait until assets have been uploaded to the GPU.
{
ThrowIfFailed(m_device->CreateFence(0, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&m_fence)));
m_fenceValue = 1;
}
}
// Update frame-based values.
void D3D12HelloTriangle::OnUpdate()
{
}
// Render the scene.
void D3D12HelloTriangle::OnRender()
{
// Record all the commands we need to render the scene into the command list.
PopulateCommandList();
// Execute the command list.
ID3D12CommandList* ppCommandLists[] = { m_commandList.Get() };
m_commandQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists);
// Present the frame.
ThrowIfFailed(m_swapChain->Present(1, 0));
WaitForPreviousFrame();
}
void D3D12HelloTriangle::OnDestroy()
{
// Ensure that the GPU is no longer referencing resources that are about to be
// cleaned up by the destructor.
WaitForPreviousFrame();
}
void D3D12HelloTriangle::PopulateCommandList()
{
// Command list allocators can only be reset when the associated
// command lists have finished execution on the GPU; apps should use
// fences to determine GPU execution progress.
ThrowIfFailed(m_commandAllocator->Reset());
// However, when ExecuteCommandList() is called on a particular command
// list, that command list can then be reset at any time and must be before
// re-recording.
ThrowIfFailed(m_commandList->Reset(m_commandAllocator.Get(), m_pipelineState.Get()));
// Set necessary state.
m_commandList->SetGraphicsRootSignature(m_rootSignature.Get());
m_commandList->RSSetViewports(1, &m_viewport);
m_commandList->RSSetScissorRects(1, &m_scissorRect);
// Indicate that the back buffer will be used as a render target.
//m_commandList->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_renderTargets[m_frameIndex].Get(), D3D12_RESOURCE_STATE_PRESENT, D3D12_RESOURCE_STATE_RENDER_TARGET));
CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHandle(m_rtvHeap->GetCPUDescriptorHandleForHeapStart(), m_frameIndex, m_rtvDescriptorSize);
m_commandList->OMSetRenderTargets(1, &rtvHandle, FALSE, nullptr);
// Record commands.
const float clearColor[] = { 1.0f, 0.0f, 0.0f, 1.0f };
m_commandList->ClearRenderTargetView(rtvHandle, clearColor, 0, nullptr);
m_commandList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
m_commandList->IASetVertexBuffers(0, 1, &m_vertexBufferView[1]);
m_commandList->DrawInstanced(4, 1, 0, 0);
m_commandList->IASetVertexBuffers(0, 1, m_vertexBufferView);
m_commandList->DrawInstanced(4, 1, 0, 0);
// Indicate that the back buffer will now be used to present.
m_commandList->ResourceBarrier(1, &CD3DX12_RESOURCE_BARRIER::Transition(m_renderTargets[m_frameIndex].Get(), D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PRESENT));
ThrowIfFailed(m_commandList->Close());
}
void D3D12HelloTriangle::WaitForPreviousFrame()
{
// WAITING FOR THE FRAME TO COMPLETE BEFORE CONTINUING IS NOT BEST PRACTICE.
// This is code implemented as such for simplicity. The D3D12HelloFrameBuffering
// sample illustrates how to use fences for efficient resource usage and to
// maximize GPU utilization.
// Signal and increment the fence value.
const UINT64 fence = m_fenceValue;
ThrowIfFailed(m_commandQueue->Signal(m_fence.Get(), fence));
m_fenceValue++;
m_frameIndex = m_swapChain->GetCurrentBackBufferIndex();
}

getAttribLocation return always the same index

attribute vec2 test;
attribute vec2 position;
void main() {
vTexCoord = position ;
vec2 gg = test;
.....
}
what stand getAttribLocation for ?
I used to believe it was the index of the attribute in the code,
but no, it always return 0 for position and 1 for test
?
getAttribLocation gets the location of the attribute. Just because your GPU/driver returns 0 for position and 1 for test doesn't mean that all drivers will.
Also, while debugging it's common to comment out parts of a shader. If an attribute is not used the driver may optimize it away. In your case if you were to comment out whatever lines use position it's likely test would get location 0. If you weren't looking up the location and you assumed test was always at location 1 your code would fail
On the other hand you can set the location before you call linkProgram by calling bindAttribLocation. For example
gl.bindAttribLocation(program, 10, "position");
gl.bindAttribLocation(program, 5, "test");
gl.linkProgram(program);
In which case you don't have to look up the locations.
var vs = `
attribute float position;
attribute float test;
void main() {
gl_Position = vec4(position, test, 0, 1);
}
`;
var fs = `
void main() {
gl_FragColor = vec4(0, 1, 0, 1);
}
`;
function createShader(gl, type, source) {
var s = gl.createShader(type);
gl.shaderSource(s, source);
gl.compileShader(s);
if (!gl.getShaderParameter(s, gl.COMPILE_STATUS)) {
console.log(gl.getShaderInfoLog(s));
}
return s;
}
var gl = document.createElement("canvas").getContext("webgl");
var prg = gl.createProgram();
gl.attachShader(prg, createShader(gl, gl.VERTEX_SHADER, vs));
gl.attachShader(prg, createShader(gl, gl.FRAGMENT_SHADER, fs));
gl.bindAttribLocation(prg, 5, "position");
gl.bindAttribLocation(prg, 10, "test");
gl.linkProgram(prg);
if (!gl.getProgramParameter(prg, gl.LINK_STATUS)) {
console.log(gl.getProgramInfoLog(prg));
}
console.log("test location:", gl.getAttribLocation(prg, "test"));
console.log("position location:", gl.getAttribLocation(prg, "position"));

"Incorrect parameter" error in DrawUserPrimitives call

I'm getting an unhandled exception with the message HRESULT: [0x80070057], Module: [General], ApiCode: [E_INVALIDARG/Invalid Arguments], Message: The parameter is incorrect. at the call to DrawUserPrimitives in the code below:
namespace Game1
open Microsoft.Xna.Framework
open Microsoft.Xna.Framework.Graphics
open System.IO
open System.Reflection
type Game1() as this =
inherit Game()
let graphics = new GraphicsDeviceManager(this)
[<DefaultValue>] val mutable effect : Effect
[<DefaultValue>] val mutable vertices : VertexPositionColor[]
do base.Content.RootDirectory <- "Content"
override this.Initialize() =
base.Initialize()
let device = base.GraphicsDevice
let s = Assembly.GetExecutingAssembly().GetManifestResourceStream("effects.mgfxo")
let reader = new BinaryReader(s)
this.effect <- new Effect(device, reader.ReadBytes((int)reader.BaseStream.Length))
()
override this.LoadContent() =
this.vertices <-
[|
VertexPositionColor(Vector3(-0.5f, -0.5f, 0.0f), Color.Red);
VertexPositionColor(Vector3(0.0f, 0.5f, 0.0f), Color.Green);
VertexPositionColor(Vector3(0.5f, -0.5f, 0.0f), Color.Yellow)
|]
override this.Draw(gameTime) =
let device = base.GraphicsDevice
do device.Clear(Color.CornflowerBlue)
this.effect.CurrentTechnique <- this.effect.Techniques.["Pretransformed"]
this.effect.CurrentTechnique.Passes |> Seq.iter
(fun pass ->
pass.Apply()
device.DrawUserPrimitives<VertexPositionColor>(PrimitiveType.TriangleList, this.vertices, 0, 1)
)
do base.Draw(gameTime)
My effect code is as follows (taken from the excellent Riemer's tutorials) and is as simple as can be. It's being converted as in this answer, and that seems to be working because I can see the effect name if I put a breakpoint in before the draw call.
struct VertexToPixel
{
float4 Position : POSITION;
float4 Color : COLOR0;
float LightingFactor: TEXCOORD0;
float2 TextureCoords: TEXCOORD1;
};
struct PixelToFrame
{
float4 Color : COLOR0;
};
VertexToPixel PretransformedVS( float4 inPos : POSITION, float4 inColor: COLOR)
{
VertexToPixel Output = (VertexToPixel)0;
Output.Position = inPos;
Output.Color = inColor;
return Output;
}
PixelToFrame PretransformedPS(VertexToPixel PSIn)
{
PixelToFrame Output = (PixelToFrame)0;
Output.Color = PSIn.Color;
return Output;
}
technique Pretransformed
{
pass Pass0
{
VertexShader = compile vs_4_0 PretransformedVS();
PixelShader = compile ps_4_0 PretransformedPS();
}
}
It works fine if I replace the custom effect with a BasicEffect as per this example.
I'm using Monogame 3.2 and Visual Studio 2013.
I worked out eventually (with help from this forum thread) that I just needed to replace POSITION with SV_POSITION in the effect file. This is a consequence of MonoGame being built in DirectX 10/11 rather then XNA's DirectX 9.

Resources