ID3D11DeviceContext::DrawIndexed() Failed - directx

my program is Directx Program that draws a container cube within it smaller cubes....these smaller cubes fall by time i hope you understand what i mean...
The program isn't complete yet ...it should draws the container only ....but it draws nothing ...only the background color is visible... i only included what i think is needed ...
this is the routines that initialize the program
bool Game::init(HINSTANCE hinst,HWND _hw){
Directx11 ::init(hinst , _hw);
return LoadContent();}
Directx11::init()
bool Directx11::init(HINSTANCE hinst,HWND hw){
_hinst=hinst;_hwnd=hw;
RECT rc;
GetClientRect(_hwnd,&rc);
height= rc.bottom - rc.top;
width = rc.right - rc.left;
UINT flags=0;
#ifdef _DEBUG
flags |=D3D11_CREATE_DEVICE_DEBUG;
#endif
HR(D3D11CreateDevice(0,_driverType,0,flags,0,0,D3D11_SDK_VERSION,&d3dDevice,&_featureLevel,&d3dDeviceContext));
if (d3dDevice == 0 || d3dDeviceContext == 0)
return 0;
DXGI_SWAP_CHAIN_DESC sdesc;
ZeroMemory(&sdesc,sizeof(DXGI_SWAP_CHAIN_DESC));
sdesc.Windowed=true;
sdesc.BufferCount=1;
sdesc.BufferDesc.Format=DXGI_FORMAT_R8G8B8A8_UNORM;
sdesc.BufferDesc.Height=height;
sdesc.BufferDesc.Width=width;
sdesc.BufferDesc.Scaling=DXGI_MODE_SCALING_UNSPECIFIED;
sdesc.BufferDesc.ScanlineOrdering=DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
sdesc.OutputWindow=_hwnd;
sdesc.BufferDesc.RefreshRate.Denominator=1;
sdesc.BufferDesc.RefreshRate.Numerator=60;
sdesc.Flags=0;
sdesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
if (m4xMsaaEnable)
{
sdesc.SampleDesc.Count=4;
sdesc.SampleDesc.Quality=m4xMsaaQuality-1;
}
else
{
sdesc.SampleDesc.Count=1;
sdesc.SampleDesc.Quality=0;
}
IDXGIDevice *Device=0;
HR(d3dDevice->QueryInterface(__uuidof(IDXGIDevice),reinterpret_cast <void**> (&Device)));
IDXGIAdapter*Ad=0;
HR(Device->GetParent(__uuidof(IDXGIAdapter),reinterpret_cast <void**> (&Ad)));
IDXGIFactory* fac=0;
HR(Ad->GetParent(__uuidof(IDXGIFactory),reinterpret_cast <void**> (&fac)));
fac->CreateSwapChain(d3dDevice,&sdesc,&swapchain);
ReleaseCOM(Device);
ReleaseCOM(Ad);
ReleaseCOM(fac);
ID3D11Texture2D *back = 0;
HR(swapchain->GetBuffer(0,__uuidof(ID3D11Texture2D),reinterpret_cast <void**> (&back)));
HR(d3dDevice->CreateRenderTargetView(back,0,&RenderTarget));
D3D11_TEXTURE2D_DESC Tdesc;
ZeroMemory(&Tdesc,sizeof(D3D11_TEXTURE2D_DESC));
Tdesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
Tdesc.ArraySize = 1;
Tdesc.Format= DXGI_FORMAT_D24_UNORM_S8_UINT;
Tdesc.Height= height;
Tdesc.Width = width;
Tdesc.Usage = D3D11_USAGE_DEFAULT;
Tdesc.MipLevels=1;
if (m4xMsaaEnable)
{
Tdesc.SampleDesc.Count=4;
Tdesc.SampleDesc.Quality=m4xMsaaQuality-1;
}
else
{
Tdesc.SampleDesc.Count=1;
Tdesc.SampleDesc.Quality=0;
}
HR(d3dDevice->CreateTexture2D(&Tdesc,0,&depthview));
HR(d3dDevice->CreateDepthStencilView(depthview,0,&depth));
d3dDeviceContext->OMSetRenderTargets(1,&RenderTarget,depth);
D3D11_VIEWPORT vp;
vp.TopLeftX=0.0f;
vp.TopLeftY=0.0f;
vp.Width = static_cast <float> (width);
vp.Height= static_cast <float> (height);
vp.MinDepth = 0.0f;
vp.MaxDepth = 1.0f;
d3dDeviceContext -> RSSetViewports(1,&vp);
return true;
SetBuild() Prepare the matrices inside the container for the smaller cubes ....i didnt program it to draw the smaller cubes yet
and this the function that draws the scene
void Game::Render(){
d3dDeviceContext->ClearRenderTargetView(RenderTarget,reinterpret_cast <const float*> (&Colors::LightSteelBlue));
d3dDeviceContext->ClearDepthStencilView(depth,D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL,1.0f,0);
d3dDeviceContext-> IASetInputLayout(_layout);
d3dDeviceContext-> IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dDeviceContext->IASetIndexBuffer(indices,DXGI_FORMAT_R32_UINT,0);
UINT strides=sizeof(Vertex),off=0;
d3dDeviceContext->IASetVertexBuffers(0,1,&vertices,&strides,&off);
D3DX11_TECHNIQUE_DESC des;
Tech->GetDesc(&des);
Floor * Lookup; /*is a variable to Lookup inside the matrices structure (Floor Contains XMMATRX Piese[9])*/
std::vector<XMFLOAT4X4> filled; // saves the matrices of the smaller cubes
XMMATRIX V=XMLoadFloat4x4(&View),P = XMLoadFloat4x4(&Proj);
XMMATRIX vp = V * P;XMMATRIX wvp;
for (UINT i = 0; i < des.Passes; i++)
{
d3dDeviceContext->RSSetState(BuildRast);
wvp = XMLoadFloat4x4(&(B.Memory[0].Pieces[0])) * vp; // Loading The Matrix at translation(0,0,0)
HR(ShadeMat->SetMatrix(reinterpret_cast<float*> ( &wvp)));
HR(Tech->GetPassByIndex(i)->Apply(0,d3dDeviceContext));
d3dDeviceContext->DrawIndexed(build_ind_count,build_ind_index,build_vers_index);
d3dDeviceContext->RSSetState(PieseRast);
UINT r1=B.GetSize(),r2=filled.size();
for (UINT j = 0; j < r1; j++)
{
Lookup = &B.Memory[j];
for (UINT r = 0; r < Lookup->filledindeces.size(); r++)
{
filled.push_back(Lookup->Pieces[Lookup->filledindeces[r]]);
}
}
for (UINT j = 0; j < r2; j++)
{
ShadeMat->SetMatrix( reinterpret_cast<const float*> (&filled[i]));
Tech->GetPassByIndex(i)->Apply(0,d3dDeviceContext);
d3dDeviceContext->DrawIndexed(piese_ind_count,piese_ind_index,piese_vers_index);
}
}
HR(swapchain->Present(0,0));}
thanks in Advance

One bug in your program appears to be that you're using i, the index of the current pass, as an index into the filled vector, when you should apparently be using j.
Another apparent bug is that in the loop where you are supposed to be iterating over the elements of filled, you're not iterating over all of them. The value r2 is set to the size of filled before you append anything to it during that pass. During the first pass this means that nothing will be drawn by this loop. If your technique only has one pass then this means that the second DrawIndexed call in your code will never be executed.
It also appears you should be only adding matrices to filled once, regardless of the number of the passes the technique has. You should consider if your code is actually meant to work with techniques with multiple passes.

Related

how to Recreate boundary from array of junctions in imagej - fiji?

I am detecting and storing the boundary of a sandpile to an array and then save it as a text file for later use. the way i stored the boundary as text file is by using the wand tool, then getting the properties of the selection which gives me a table. Then converting table to array and finally storing it as text file.
after doing so i noticed that the above mentioned table only has the (X,Y) coordinates of the "junctions" on the boundary and not every pixel of the boundary.
Now when i say later use, i want to smooth out the boundary with various methods and redraw it but i am stuck at how to go from junctions to complete boundary. Below is my attempt to do it but i am seeing heavy ram usage just after the output is printed.
Thanks for any help and your time.
X=newArray(1,5,5,10);
Y=newArray(3,3,8,8);
c=newArray("c1","c2");
//answer should be g=(1,2,3,4,5,5,5,5,5,5,6,7,8,9,10, --x part
// 3,3,3,3,3,4,5,6,7,8,8,8,8,8,8) --y part
//f=slide(a,2,c);Array.print(f);
g=cmpltarray(X,Y);Array.print(g);
function cmpltarray(Tx,Ty){
for (i = 0; i < Tx.length-1; i++) {
if(Tx[i]==Tx[i+1] && Ty[i]!=Ty[i+1])
{
l=abs(Ty[i]-Ty[i+1])-1;tempy=newArray(l);tempx=newArray(l);
for (j = 0; j < l; j++) {
tempy[j]=Ty[i]+j+1;tempx[j]=Tx[i];
}
Tx=slide(Tx,(i+1),tempx);Ty=slide(Ty,(i+1),tempy);i=i+l;
}
if(Ty[i]==Ty[i+1] && Tx[i]!=Tx[i+1])
{
l=abs(Tx[i]-Tx[i+1])-1;tempy=newArray(l);tempx=newArray(l);
for (j = 0; j < l; j++) {
tempx[j]=Tx[i]+j+1;tempy[j]=Ty[i];
}
Tx=slide(Tx,(i+1),tempx);Ty=slide(Ty,(i+1),tempy);i=i+l;
}
}
return Array.concat(Tx,Ty);
}
function slide(array,n,data){
array=Array.concat(Array.slice(array,0,n),data,Array.slice(array,n,array.length));
return array;
}

Fast implementation of BWT in Lua

local function fShallowCopy(tData)
local tOutput = {}
for k,v in ipairs(tData) do
tOutput[k] = v
end
return tOutput
end
local function fLexTblSort(tA,tB) --sorter for tables
for i=1,#tA do
if tA[i]~=tB[i] then
return tA[i]<tB[i]
end
end
return false
end
function fBWT(tData)
--setup--
local iSize = #tData
local tSolution = {}
local tSolved = {}
--key table--
for n=1,iSize do
tData[iSize] = fRemove(tData,1)
tSolution[n] = fShallowCopy(tData)
end
table.sort(tSolution,fLexTblSort)
--encode output--
for i=1,iSize do
tSolved[i] = tSolution[i][iSize]
end
--finalize--
for i=1,iSize do
if fIsEqual(tSolution[i],tData) then
return i,tSolved
end
end
return false
end
Above is my current code for achieving BWT encoding in Lua. The issue is because of the size of the tables and lengths of loops it takes a long time to run. For a 1000 character input the average encoding time is about 1.15 seconds. Does anyone have suggestions for making a faster BWT encoding function?
the biggest slowdowns appear to be in fLexTblSort and fShallowCopy. I have included both above the BWT function as well.
If I see right, your algorithm has complexity O(n^2 log n), if the sort is quicksort. The comparator function fLexTblSort takes O(n) itself for each pair of values you compare.
As I checked with my implementation from few years back, I see possible space to improve. You create all the possible rotations of the tData, which takes also a lot of time. I used only single data block and I stored only starting positions of particular rotations. You also use a lot of loops which can shrink into less.
Mine implementation was in C, but the concept can be used also in Lua. The idea in some hybrid pseudocode between your Lua and C.
function fBWT(tData)
local n = #tData
local tSolution = {}
for(i = 0; i < n; i++)
tSolution[i] = i;
--table.sort(tSolution, fLexTblSort)
quicksort(tData, n, tSolution, 0, n)
for(i = 0; i < n; i++){
tSolved[i] = tData[(tSolution[i]+n-1)%n];
if( tSolution[i] == 0 )
I = i;
}
return I, tSolved
end
You will also need your own sort function, because the standard does not offer enough flexibility for this magic. Quicksort is a good idea (you might avoid some of the arguments, but I pasted just the C version I was using):
void swap(int array[], int left, int right){
int tmp = array[right];
array[right] = array[left];
array[left] = tmp;
}
void quicksort(uint8_t data[], int length, int array[], int left, int right){
if(left < right){
int boundary = left;
for(int i = left + 1; i < right; i++){
if( offset_compare(data, length, array, i, left) < 0 ){
swap(array, i, ++boundary);
}
}
swap(array, left, boundary);
quicksort(data, length, array, left, boundary);
quicksort(data, length, array, boundary + 1, right);
}
}
The last step is your own comparator function (similar to your original, but working on the rotations, again in C):
/**
* compare one string (fixed length) with different rotations.
*/
int offset_compare(uint8_t *data, int length, int *array, int first, int second){
int res;
for(int i = 0; i < length; i++){
res = data[(array[first]+i)%length] - data[(array[second]+i)%length];
if( res != 0 ){
return res;
}
}
return 0;
}
This is the basic idea I came up with few years ago and which worked for me. Let me know if there is something not clear or some mistake.

Grouped Models Render Slowly in iOS (OpenGL ES 2.0)

First, let me clarify what I mean by "grouped models." I'm not actually sure what the standard terminology for this is. In order to reduce the number of rendering calls, I am grouping multiple models into a single model, and rendering the whole thing with a single call to glDrawElements (using VBOs). In my code, I call this a ModelGroup. I use it for various things, but especially for large groups of geometrically simple objects (like buildings in a city, or particles).
The problem has recently surfaced where my ModelGroups are rendering very slowly. I have isolated the slowdown to the actual call to glDrawElements by putting a timer around it. For instance, my particles used to render ~10k particles (without instancing) at around 2ms or so. I can't recall the exact number, but let's just say the rendering was definitely not the bottleneck as it currently is. As of now, a single call to glDrawElements with 10k particles takes right about 256ms. This performance is only marginally better than rendering the objects each with separate calls to glDrawElements. So, there is clearly a massive burden on the GPU for some reason.
What has changed in my engine:
I recently updated XCode and changed from using EAGLView to using GLKViewController. I changed nothing else in my code between these two very different states of performance. I will say that, in order to migrate over to the use of the GLKViewController, I recreated my project entirely and added all of my source files in. Then I rewrote my game loop to be updated by the GLKViewController's update function. This was a very minor change, however.
Just to be completely clear on what my ModelGroup class does, I will post the function that compiles the added models into the display model which is rendered.
-(bool) compileDisplayModelWithNormals:(bool)updateNormals withTexcoords:(bool)updateTexcoords withColor:(bool)updateColor withElements:(bool)updateElements
{
modelCompiled = YES;
bool initd = displayModel->positions;
// set properties
if( !initd )
{
displayModel->primType = GL_UNSIGNED_SHORT;
displayModel->elementType = GL_TRIANGLES;
displayModel->positionType = GL_FLOAT;
displayModel->texcoordType = GL_FLOAT;
displayModel->colorType = GL_FLOAT;
displayModel->normalType = GL_FLOAT;
displayModel->positionSize = 3;
displayModel->normalSize = 3;
displayModel->texcoordSize = 2;
displayModel->colorSize = 4;
// initialize to zero
displayModel->numVertices = 0;
displayModel->numElements = 0;
displayModel->positionArraySize = 0;
displayModel->texcoordArraySize = 0;
displayModel->normalArraySize = 0;
displayModel->elementArraySize = 0;
displayModel->colorArraySize = 0;
// sum the sizes
for( NSObject<RenderedItem> *ri in items )
{
GLModel *model = ri.modelAsset.model.displayModel;
displayModel->numVertices += model->numVertices;
displayModel->numElements += model->numElements;
displayModel->positionArraySize += model->positionArraySize;
displayModel->texcoordArraySize += model->texcoordArraySize;
displayModel->normalArraySize += model->normalArraySize;
displayModel->elementArraySize += model->elementArraySize;
displayModel->colorArraySize += model->colorArraySize;
}
displayModel->positions = (GLfloat *)malloc( displayModel->positionArraySize );
displayModel->texcoords = (GLfloat *)malloc( displayModel->texcoordArraySize );
displayModel->normals = (GLfloat *)malloc( displayModel->normalArraySize );
displayModel->elements = (GLushort *)malloc( displayModel->elementArraySize );
displayModel->colors = (GLfloat *)malloc( displayModel->colorArraySize );
}
// update the data
int vertexOffset = 0;
int elementOffset = 0;
for( int j = 0; j < [items count]; j++ )
{
NSObject<RenderedItem> *ri = (GameItem *)[items objectAtIndex:j];
GLModel *model = ri.modelAsset.model.displayModel;
if( !ri.geometryUpdate )
{
vertexOffset += model->numVertices;
continue;
}
// reset the update flag
ri.geometryUpdate = NO;
// get GameItem transform data
rpVec3 pos = [ri getPosition];
rpMat3 rot = [ri orientation];
int NoV = model->numVertices;
int NoE = model->numElements;
for( int i = 0; i < NoV; i++ )
{
// positions
rpVec3 r = rpVec3( model->positions, model->positionSize * i );
// scale
rpVec3 s = ri.scale;
r.swizzleLocal( s );
// rotate
r = rot * r;
// translate
r.addLocal( pos );
int start = model->positionSize * (vertexOffset + i);
for( int k = 0; k < model->positionSize; k++ )
displayModel->positions[start + k] = r[k];
if( updateTexcoords )
{
// texcoords
start = model->texcoordSize * (vertexOffset + i);
if( model->texcoords )
for( int k = 0; k < model->texcoordSize; k++ )
displayModel->texcoords[start + k] = model->texcoords[model->texcoordSize * i + k];
}
if( updateNormals )
{
// normals (need to be rotated)
if( model->normals )
{
for( int k = 0; k < model->normalSize; k++ )
{
rpVec3 vn = rpVec3( model->normals, model->normalSize * i );
rpVec3 vnRot = rot * vn;
start = model->normalSize * (vertexOffset + i);
displayModel->normals[start + k] = vnRot[k];
}
}
}
if( updateColor )
{
if( model->colors )
{
start = model->colorSize * (vertexOffset + i);
displayModel->colors[start] = ri.color.r;
displayModel->colors[start + 1] = ri.color.g;
displayModel->colors[start + 2] = ri.color.b;
displayModel->colors[start + 3] = ri.color.a;
}
}
}
if( updateElements )
{
for( int i = 0; i < NoE; i++ )
{
// elements
displayModel->elements[elementOffset + i] = model->elements[i] + vertexOffset;
}
}
vertexOffset += NoV;
elementOffset += NoE;
}
return YES;
}
Just to be complete, here is how I render the particles. Inside the particle field draw function:
glBindVertexArray( modelGroup.displayModel->modelID );
glBindTexture( GL_TEXTURE_2D, textureID );
// set shader program
if( changeShader ) glUseProgram( shader.programID );
[modelViewStack push];
mtxMultiply( modelViewProjectionMatrix.m, [projectionStack top].m, [modelViewStack top].m );
glUniformMatrix4fv( shader.modelViewProjectionMatrixID, 1, GL_FALSE, modelViewProjectionMatrix.m );
[DebugTimer check:#"particle main start"];
glDrawElements( GL_TRIANGLES, modelGroup.displayModel->numElements, GL_UNSIGNED_SHORT, 0 );
[DebugTimer check:#"particle main end"];
[modelViewStack pop];
The two statements that sandwich the glDrawElements statement are the timer I used to measure time between events.
Also, I just wanted to add that I have run on both the device and the iPad simulator 6.1 with the same result. The simulator is slower at performing multiple draw calls, but both are equally slow at calling glDrawElements for a ModelGroup. As far as hardware acceleration is concerned, I have checked to make sure that this performance hit isn't coming as some side effect of a lack of acceleration. I rendered a model read in from a file which contained 1024 cubes (similar to a ModelGroup for a city) which rendered with no problem (no 20ms delay as with 1000 cubes in a ModelGroup).
I believe that I have solved the mystery, in a manner of speaking. It is, after all, still a mystery to me why this solves the problem.
I had been using my own custom enum values for these functions
glEnableVertexAttribArray
glVertexAttribPointer
instead of using the newly (as of iOS 5.0, I think) Apple-specified values that came as part of the GLKViewController class:
GLKVertexAttribPosition
GLKVertexAttribNormal
GLKVertexAttribTexcoord0
Having made this change yielded the kind of performance I expected when calling glDrawElements. My model groups will now render on the order of 0.1 ms as they should, rather than ~20 ms. As I said, I really do not understand exactly why this fixes anything, but it's a solution all the same.

Histogram Smoothing

I have a probably pretty simple question but I am still not sure!
Actually I only want to smooth a histogram, and I am not sure which of the following to methods is correct. Would I do it like this:
vector<double> mask(3);
mask[0] = 0.25; mask[1] = 0.5; mask[2] = 0.25;
vector<double> tmpVect(histogram->size());
for (unsigned int i = 0; i < histogram->size(); i++)
tmpVect[i] = (*histogram)[i];
for (int bin = 1; bin < histogram->size()-1; bin++) {
double smoothedValue = 0;
for (int i = 0; i < mask.size(); i++) {
smoothedValue += tmpVect[bin-1+i]*mask[i];
}
(*histogram)[bin] = smoothedValue;
}
Or would you usually do it like this?:
vector<double> mask(3);
mask[0] = 0.25; mask[1] = 0.5; mask[2] = 0.25;
for (int bin = 1; bin < histogram->size()-1; bin++) {
double smoothedValue = 0;
for (int i = 0; i < mask.size(); i++) {
smoothedValue += (*histogram)[bin-1+i]*mask[i];
}
(*histogram)[bin] = smoothedValue;
}
My Questin is: Is it resonable to copy the histogram in a extra vector first so that when I smooth at bin i I can use the original i-1 value or would I simply do smoothedValue += (*histogram)[bin-1+i]*mask[i];, so that I use the already smoothed i-1 value instead the original one.
Regards & Thanks for a reply.
Your intuition is right: you need a temporary vector. Otherwise, you will end up using partly old values, and partly new values, and the result will not be correct. Try it yourself on paper with a simple example.
There are two ways you can write this algorithm:
Copy the data to a temporary vector first; then read from that one, and write to histogram. This is what you did in your first code fragment.
Read from histogram and write to a temporary vector; then copy from the temporary vector back to histogram.
To prevent needless copying of data, you can use vector::swap. This is an extremely fast operation that swaps the contents of two vectors. Using strategy 2 above, this would result in:
vector<double> mask(3);
mask[0] = 0.25; mask[1] = 0.5; mask[2] = 0.25;
vector<double> newHistogram(histogram->size());
for (int bin = 1; bin < histogram->size()-1; bin++) {
double smoothedValue = 0;
for (int i = 0; i < mask.size(); i++) {
smoothedValue += (*histogram)[bin-1+i]*mask[i];
}
newHistogram[bin] = smoothedValue;
}
histogram->swap(newHistogram);

Checking if removing an edge in a graph will result in the graph splitting

I have a graph structure where I am removing edges one by one until some conditions are met. My brain has totally stopped and i can't find an efficient way to detect if removing an edge will result in my graph splitting in two or more graphs.
The bruteforce solution would be to do an bfs until one can reach all the nodes from a random node, but that will take too much time with large graphs...
Any ideas?
Edit: After a bit of search it seems what I am trying to do is very similar to the fleury's algorithm, where I need to find if an edge is a "bridge" or not.
Edges that make a graph disconnected when removed are called 'bridges'. You can find them in O(|V|+|E|) with a single depth-first search over the whole graph. A related algorithm finds all 'articulation points' (nodes that, if removed, makes the graph disconnected) follows. Any edge between two articulation-points is a bridge (you can test that in a second pass over all edges).
//
// g: graph; v: current vertex id;
// r_p: parents (r/w); r_a: ascents (r/w); r_ap: art. points, bool array (r/w)
// n_v: bfs order-of-visit
//
void dfs_art_i(graph *g, int v, int *r_p, int *r_v, int *r_a, int *r_ap, int *n_v) {
int i;
r_v[v] = *n_v;
r_a[v] = *n_v;
(*n_v) ++;
// printf("entering %d (nv = %d)\n", v, *n_v);
for (i=0; i<g->vertices[v].n_edges; i++) {
int w = g->vertices[v].edges[i].target;
// printf("\t evaluating %d->%d: ", v, w);
if (r_v[w] == -1) {
// printf("...\n");
// This is the first time we find this vertex
r_p[w] = v;
dfs_art_i(g, w, r_p, r_v, r_a, r_ap, n_v);
// printf("\n\t ... back in %d->%d", v, w);
if (r_a[w] >= r_v[v]) {
// printf(" - a[%d] %d >= v[%d] %d", w, r_a[w], v, r_v[v]);
// Articulation point found
r_ap[i] = 1;
}
if (r_a[w] < r_a[v]) {
// printf(" - a[%d] %d < a[%d] %d", w, r_a[w], v, r_a[v]);
r_a[v] = r_a[w];
}
// printf("\n");
}
else {
// printf("back");
// We have already found this vertex before
if (r_v[w] < r_a[v]) {
// printf(" - updating ascent to %d", r_v[w]);
r_a[v] = r_v[w];
}
// printf("\n");
}
}
}
int dfs_art(graph *g, int root, int *r_p, int *r_v, int *r_a, int *r_ap) {
int i, n_visited = 0, n_root_children = 0;
for (i=0; i<g->n_vertices; i++) {
r_p[i] = r_v[i] = r_a[i] = -1;
r_ap[i] = 0;
}
dfs_art_i(g, root, r_p, r_v, r_a, r_ap, &n_visitados);
// the root can only be an AP if it has more than 1 child
for (i=0; i<g->n_vertices; i++) {
if (r_p[i] == root) {
n_root_children ++;
}
}
r_ap[root] = n_root_children > 1 ? 1 : 0;
return 1;
}
If you remove the link between vertices A and B, can't you just check that you can still reach A from B after the edge removal? That's a little better than getting to all nodes from a random node.
How do you choose the edges to be removed?
Can you tell more about your problem domain?
Just how large Is your graph? maybe BFS is just fine!
After you wrote that you are trying to find out whether an edge is a bridge or not, I suggest
you remove edges in decreasing order of their betweenness measure.
Essentially, betweenness is a measure of an edges (or vertices) centrality in a graph.
Edges with higher value of betweenness have greater potential of being a bridge in a graph.
Look it up on the web, the algorithm is called 'Girvan-Newman algorithm'.

Resources