I have created a random topology in ned file. In that topology, every node has the same radian connection distance. I want to make connections for every node that is in the rc distance.
This is as far I have get:
network ranTop
{
parameters:
int n = 100;
int rc = 5;
volatile int posX = intuniform (0,100);
volatile int posY = intuniform (0,100);
submodules:
node[n] : Node {
parameters:
#display("p=$posX,$posY");
}
}
You should have (non volatile!) posX, posY parameter for each node and then create a loop over all possible connections and connect the nodes only if the distance is less than range. You can access the node parameters in the loop and in the if condition too:
simple MeshNode extends Node
{
parameters:
int posX = default(intuniform(0,100));
int posY = default(intuniform(0,100));
#display("i=misc/node_vs;p=$posX,$posY");
gates:
inout g[];
}
//
// A network of nodes randomly placed where nodes
// closer than 'range' are connected.
//
network MeshNetwork
{
parameters:
int num #prompt("Number of nodes") = default(100);
int range = 15; // nodes closer than this should be connected
submodules:
node[num] : MeshNode;
connections allowunconnected:
for i=0..(num-2), for j=(i+1)..(num-1) {
node[i].g++ <--> node[j].g++ if pow(node[i].posX-node[j].posX, 2)+pow(node[i].posY-node[j].posY, 2) < range*range;
}
}
Related
I am trying to find the distance of a node from the root of a binary tree but I am getting right answer up to only 3 branches only. like for the node(4) I am getting 3 and for the node (9) and node(10) I am getting 3
#include<bits/stdc++.h>
using namespace std;
struct node
{
int data;
struct node *left;
struct node *right;
node(int val)
{
data = val;
left = NULL;
right = NULL;
}
};
int find_node(node* root,int n)
{
static int length=1;
if (root== NULL)
{
return 0;
}
if (root->data==n)
{
return length;
}
length=length+(find_node(root->left,n)||find_node(root->right,n));
// find_node(root->left,n);
// find_node(root->right,n);
return length;
}
int main ()
{
struct node* root = new node(1);
root->left = new node(2);
root->right = new node(3);
root->left->left = new node(4);
root->left->right = new node(5);
root->right->left = new node(6);
root->right->right = new node(7);
root->right->right->right = new node(9);
root->right->right->right->right = new node(10);
cout <<find_node(root,10);
return 0;}
When your code reaches the first leaf node (with data 4), the following assignment will assign 1:
length=length+(find_node(root->left,n)||find_node(root->right,n));
Because the expression resolves to 1+(0||0), i.e. 1. And so 1 is returned.
The caller (at the node with data 2) will thus receive this 1, and so the above statement will yield 2, since it resolves to 1+(1||......), which is 2 -- the second operand of || is not evaluated.
The parent caller (at the node with data 1), will thus receive this 2. The assignment there resolves to 1+(2||.....), which is again 2 -- realise that || is a logical operator, so it can only evaluate to a boolean value (i.e. 0 or 1).
The issues
In summmary:
You should not use || as it can only evaluate to 0 or 1, losing the actual value from recursion that you need.
You should not use a static variable. For one, it would not reset if you would make a second call to this function from the main program code. Instead, every recursive call should just "mind its own business" and return the depth of n from the given root. The caller should add 1 to that if n was found.
Correction
int find_node(node* root, int n)
{
if (root == NULL)
{
return 0;
}
if (root->data == n)
{
return 1;
}
int length = find_node(root->left, n);
if (!length)
{
length = find_node(root->right, n);
}
if (!length)
{
return 0;
}
return 1 + length;
}
In most resources implementation of BFS is like this(this is geeks for geeks implementation):
void findpaths(vector<vector<int> >&g, int src,
int dst, int v)
{
// create a queue which stores
// the paths
queue<vector<int> > q;
// path vector to store the current path
vector<int> path;
path.push_back(src);
q.push(path);
while (!q.empty()) {
path = q.front();
q.pop();
int last = path[path.size() - 1];
// if last vertex is the desired destination
// then print the path
if (last == dst)
printpath(path);
// traverse to all the nodes connected to
// current vertex and push new path to queue
for (int i = 0; i < g[last].size(); i++) {
if (isNotVisited(g[last][i], path)) {
vector<int> newpath(path);
newpath.push_back(g[last][i]);
q.push(newpath);
}
}
}
the above implementation suggests that we first add the neighbors to the queue then we will check it that it is our destination or not.
but we can simply check the neighbor is the destination or not when adding the neighbor to the queue (instead of checking it when it is that node's turn) although it is a very minor improvement, it is better than the last one. so why everyone use the previous method for implementing BFS?
my program is Directx Program that draws a container cube within it smaller cubes....these smaller cubes fall by time i hope you understand what i mean...
The program isn't complete yet ...it should draws the container only ....but it draws nothing ...only the background color is visible... i only included what i think is needed ...
this is the routines that initialize the program
bool Game::init(HINSTANCE hinst,HWND _hw){
Directx11 ::init(hinst , _hw);
return LoadContent();}
Directx11::init()
bool Directx11::init(HINSTANCE hinst,HWND hw){
_hinst=hinst;_hwnd=hw;
RECT rc;
GetClientRect(_hwnd,&rc);
height= rc.bottom - rc.top;
width = rc.right - rc.left;
UINT flags=0;
#ifdef _DEBUG
flags |=D3D11_CREATE_DEVICE_DEBUG;
#endif
HR(D3D11CreateDevice(0,_driverType,0,flags,0,0,D3D11_SDK_VERSION,&d3dDevice,&_featureLevel,&d3dDeviceContext));
if (d3dDevice == 0 || d3dDeviceContext == 0)
return 0;
DXGI_SWAP_CHAIN_DESC sdesc;
ZeroMemory(&sdesc,sizeof(DXGI_SWAP_CHAIN_DESC));
sdesc.Windowed=true;
sdesc.BufferCount=1;
sdesc.BufferDesc.Format=DXGI_FORMAT_R8G8B8A8_UNORM;
sdesc.BufferDesc.Height=height;
sdesc.BufferDesc.Width=width;
sdesc.BufferDesc.Scaling=DXGI_MODE_SCALING_UNSPECIFIED;
sdesc.BufferDesc.ScanlineOrdering=DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
sdesc.OutputWindow=_hwnd;
sdesc.BufferDesc.RefreshRate.Denominator=1;
sdesc.BufferDesc.RefreshRate.Numerator=60;
sdesc.Flags=0;
sdesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
if (m4xMsaaEnable)
{
sdesc.SampleDesc.Count=4;
sdesc.SampleDesc.Quality=m4xMsaaQuality-1;
}
else
{
sdesc.SampleDesc.Count=1;
sdesc.SampleDesc.Quality=0;
}
IDXGIDevice *Device=0;
HR(d3dDevice->QueryInterface(__uuidof(IDXGIDevice),reinterpret_cast <void**> (&Device)));
IDXGIAdapter*Ad=0;
HR(Device->GetParent(__uuidof(IDXGIAdapter),reinterpret_cast <void**> (&Ad)));
IDXGIFactory* fac=0;
HR(Ad->GetParent(__uuidof(IDXGIFactory),reinterpret_cast <void**> (&fac)));
fac->CreateSwapChain(d3dDevice,&sdesc,&swapchain);
ReleaseCOM(Device);
ReleaseCOM(Ad);
ReleaseCOM(fac);
ID3D11Texture2D *back = 0;
HR(swapchain->GetBuffer(0,__uuidof(ID3D11Texture2D),reinterpret_cast <void**> (&back)));
HR(d3dDevice->CreateRenderTargetView(back,0,&RenderTarget));
D3D11_TEXTURE2D_DESC Tdesc;
ZeroMemory(&Tdesc,sizeof(D3D11_TEXTURE2D_DESC));
Tdesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
Tdesc.ArraySize = 1;
Tdesc.Format= DXGI_FORMAT_D24_UNORM_S8_UINT;
Tdesc.Height= height;
Tdesc.Width = width;
Tdesc.Usage = D3D11_USAGE_DEFAULT;
Tdesc.MipLevels=1;
if (m4xMsaaEnable)
{
Tdesc.SampleDesc.Count=4;
Tdesc.SampleDesc.Quality=m4xMsaaQuality-1;
}
else
{
Tdesc.SampleDesc.Count=1;
Tdesc.SampleDesc.Quality=0;
}
HR(d3dDevice->CreateTexture2D(&Tdesc,0,&depthview));
HR(d3dDevice->CreateDepthStencilView(depthview,0,&depth));
d3dDeviceContext->OMSetRenderTargets(1,&RenderTarget,depth);
D3D11_VIEWPORT vp;
vp.TopLeftX=0.0f;
vp.TopLeftY=0.0f;
vp.Width = static_cast <float> (width);
vp.Height= static_cast <float> (height);
vp.MinDepth = 0.0f;
vp.MaxDepth = 1.0f;
d3dDeviceContext -> RSSetViewports(1,&vp);
return true;
SetBuild() Prepare the matrices inside the container for the smaller cubes ....i didnt program it to draw the smaller cubes yet
and this the function that draws the scene
void Game::Render(){
d3dDeviceContext->ClearRenderTargetView(RenderTarget,reinterpret_cast <const float*> (&Colors::LightSteelBlue));
d3dDeviceContext->ClearDepthStencilView(depth,D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL,1.0f,0);
d3dDeviceContext-> IASetInputLayout(_layout);
d3dDeviceContext-> IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dDeviceContext->IASetIndexBuffer(indices,DXGI_FORMAT_R32_UINT,0);
UINT strides=sizeof(Vertex),off=0;
d3dDeviceContext->IASetVertexBuffers(0,1,&vertices,&strides,&off);
D3DX11_TECHNIQUE_DESC des;
Tech->GetDesc(&des);
Floor * Lookup; /*is a variable to Lookup inside the matrices structure (Floor Contains XMMATRX Piese[9])*/
std::vector<XMFLOAT4X4> filled; // saves the matrices of the smaller cubes
XMMATRIX V=XMLoadFloat4x4(&View),P = XMLoadFloat4x4(&Proj);
XMMATRIX vp = V * P;XMMATRIX wvp;
for (UINT i = 0; i < des.Passes; i++)
{
d3dDeviceContext->RSSetState(BuildRast);
wvp = XMLoadFloat4x4(&(B.Memory[0].Pieces[0])) * vp; // Loading The Matrix at translation(0,0,0)
HR(ShadeMat->SetMatrix(reinterpret_cast<float*> ( &wvp)));
HR(Tech->GetPassByIndex(i)->Apply(0,d3dDeviceContext));
d3dDeviceContext->DrawIndexed(build_ind_count,build_ind_index,build_vers_index);
d3dDeviceContext->RSSetState(PieseRast);
UINT r1=B.GetSize(),r2=filled.size();
for (UINT j = 0; j < r1; j++)
{
Lookup = &B.Memory[j];
for (UINT r = 0; r < Lookup->filledindeces.size(); r++)
{
filled.push_back(Lookup->Pieces[Lookup->filledindeces[r]]);
}
}
for (UINT j = 0; j < r2; j++)
{
ShadeMat->SetMatrix( reinterpret_cast<const float*> (&filled[i]));
Tech->GetPassByIndex(i)->Apply(0,d3dDeviceContext);
d3dDeviceContext->DrawIndexed(piese_ind_count,piese_ind_index,piese_vers_index);
}
}
HR(swapchain->Present(0,0));}
thanks in Advance
One bug in your program appears to be that you're using i, the index of the current pass, as an index into the filled vector, when you should apparently be using j.
Another apparent bug is that in the loop where you are supposed to be iterating over the elements of filled, you're not iterating over all of them. The value r2 is set to the size of filled before you append anything to it during that pass. During the first pass this means that nothing will be drawn by this loop. If your technique only has one pass then this means that the second DrawIndexed call in your code will never be executed.
It also appears you should be only adding matrices to filled once, regardless of the number of the passes the technique has. You should consider if your code is actually meant to work with techniques with multiple passes.
So I have a run method which summates the weights of the edges in the artificial neural network with the threshold values of the input nodes.
Sort of like this:
Now my test perceptron should produce a summation of -3, but I am getting a value of 1176!!! What is going on here?
Here is the code that I have written for my run() method, constructor, and my main method.
Constructor:
public class Perceptron {
//We want to create a variable which will represent the number of weighted edges
//in the 2-dimensional array.
protected int num_weighted_Edges;
//Inside this class we want to create a data field which is a
//2-D array of WeightedEdges. Since the weightedEdges will be in
//double data type, we will create a double type 2-dimensional
//array.
protected WeightedEdge[][] weightedEdges;
protected int[] weights;
//We set a double field named eta equal to 0.05.
protected double eta = 0.05;
//We initialize a constructor which only takes a parameter int n.
public Perceptron(int n){
//We want to create a new graph which will have n + 1 vertices
//, where we also want vertex 0 to act like the output node
//as in a neural network.
this.num_weighted_Edges = n;
weights = new int[num_weighted_Edges];
//First we need to verify that n is a positive real number
if (num_weighted_Edges < 0){
throw new RuntimeException("You cannot have a perceptron of negative value");
}
else {
//Test code for testing if this code works.
System.out.println("A perceptron of " + num_weighted_Edges + " input nodes, and 1 output node was created");
}
//Now we create a graph object with "n" number of vertices.
weightedEdges = new WeightedEdge[num_weighted_Edges + 1][num_weighted_Edges + 1];
//Create a for loop that will iterate the weightedEdges array.
//We want to create the weighted edges from vertex 1 and not vertex 0
//since vertex 0 will be the output node, so we set i = 1.
for (int i = 1; i < weightedEdges.length; i++){
for (int j = 0; j < weightedEdges[i].length; j++){
//This will create a weighted edge in between [1][0]...[2][0]...[3][0]
//The weighted edge will have a random value between -1 and 1 assigned to it.
weightedEdges[i][0] = new WeightedEdge(i, j, 1);
}
}
}
This is my run() method:
//This method will take the input nodes, do a quick verification check on it and
//sum up the weights using the simple threshold function described in class to return
//either a 1 or -1. 1 meaning fire, and -1 not firing.
public int run(int[] weights){
//So this method will act like the summation function. It will take the int parameters
//you put into the parameter field and multiply it times the input nodes in the
//weighted edge 2 d array.
//Setup a summation counter.
int sum = 0;
if (weights.length != num_weighted_Edges){
throw new RuntimeException("Array coming in has to equal the number of input nodes");
}
else {
//We iterate the weights array and use the sum counter to sum up weights.
for (int i = 0; i < weights.length; i++){
//Create a nested for loop which will iterate over the input nodes
for ( int j = 1; j < weightedEdges.length; j++){
for (int k = 0; k < weightedEdges[j].length; k++){
//This takes the weights and multiplies it times the value in the
//input nodes. The sum should equal greater than 0 or less than 0.
sum += (int) ((weightedEdges[j][0].getWeight()) * i);
//Here the plus equals sign takes the product of (weightedEdges[j][0] * i) and
//then adds it to the previous value.
}
}
}
}
System.out.println(sum);
//If the sum is greater than 0, we fire the neuron by returning 1.
if (sum > 0){
//System.out.println(1); test code
return 1;
}
//Else we don't fire and return -1.
else {
//System.out.println(-1); test code
return -1;
}
}
This is my main method:
//Main method which will stimulate the artificial neuron (perceptron, which is the
//simplest type of neuron in an artificial network).
public static void main(String[] args){
//Create a test perceptron with a user defined set number of nodes.
Perceptron perceptron = new Perceptron(7);
//Create a weight object that creates an edge between vertices 1 and 2
//with a weight of 1.5
WeightedEdge weight = new WeightedEdge(1, 2, 1.5);
//These methods work fine.
weight.getStart();
weight.getEnd();
weight.setWeight(2.0);
//Test to see if the run class works. (Previously was giving a null pointer, but
//fixed now)
int[] test_weight_Array = {-1, -1, -1, -1, -1, 1, 1};
//Tested and works to return output of 1 or -1. Also catches exceptions.
perceptron.run(test_weight_Array);
//Testing a 2-d array to see if the train method works.
int[][] test_train_Array = {{1}, {-1}, {1}, {1}, {1}, {1}, {1}, {1}};
//Works and catches exceptions.
perceptron.train(test_train_Array);
}
}
I think you should change
sum += (int) ((weightedEdges[j][0].getWeight()) * i);
to
sum += (int) ((weightedEdges[j][k].getWeight()) * i);
I have a graph structure where I am removing edges one by one until some conditions are met. My brain has totally stopped and i can't find an efficient way to detect if removing an edge will result in my graph splitting in two or more graphs.
The bruteforce solution would be to do an bfs until one can reach all the nodes from a random node, but that will take too much time with large graphs...
Any ideas?
Edit: After a bit of search it seems what I am trying to do is very similar to the fleury's algorithm, where I need to find if an edge is a "bridge" or not.
Edges that make a graph disconnected when removed are called 'bridges'. You can find them in O(|V|+|E|) with a single depth-first search over the whole graph. A related algorithm finds all 'articulation points' (nodes that, if removed, makes the graph disconnected) follows. Any edge between two articulation-points is a bridge (you can test that in a second pass over all edges).
//
// g: graph; v: current vertex id;
// r_p: parents (r/w); r_a: ascents (r/w); r_ap: art. points, bool array (r/w)
// n_v: bfs order-of-visit
//
void dfs_art_i(graph *g, int v, int *r_p, int *r_v, int *r_a, int *r_ap, int *n_v) {
int i;
r_v[v] = *n_v;
r_a[v] = *n_v;
(*n_v) ++;
// printf("entering %d (nv = %d)\n", v, *n_v);
for (i=0; i<g->vertices[v].n_edges; i++) {
int w = g->vertices[v].edges[i].target;
// printf("\t evaluating %d->%d: ", v, w);
if (r_v[w] == -1) {
// printf("...\n");
// This is the first time we find this vertex
r_p[w] = v;
dfs_art_i(g, w, r_p, r_v, r_a, r_ap, n_v);
// printf("\n\t ... back in %d->%d", v, w);
if (r_a[w] >= r_v[v]) {
// printf(" - a[%d] %d >= v[%d] %d", w, r_a[w], v, r_v[v]);
// Articulation point found
r_ap[i] = 1;
}
if (r_a[w] < r_a[v]) {
// printf(" - a[%d] %d < a[%d] %d", w, r_a[w], v, r_a[v]);
r_a[v] = r_a[w];
}
// printf("\n");
}
else {
// printf("back");
// We have already found this vertex before
if (r_v[w] < r_a[v]) {
// printf(" - updating ascent to %d", r_v[w]);
r_a[v] = r_v[w];
}
// printf("\n");
}
}
}
int dfs_art(graph *g, int root, int *r_p, int *r_v, int *r_a, int *r_ap) {
int i, n_visited = 0, n_root_children = 0;
for (i=0; i<g->n_vertices; i++) {
r_p[i] = r_v[i] = r_a[i] = -1;
r_ap[i] = 0;
}
dfs_art_i(g, root, r_p, r_v, r_a, r_ap, &n_visitados);
// the root can only be an AP if it has more than 1 child
for (i=0; i<g->n_vertices; i++) {
if (r_p[i] == root) {
n_root_children ++;
}
}
r_ap[root] = n_root_children > 1 ? 1 : 0;
return 1;
}
If you remove the link between vertices A and B, can't you just check that you can still reach A from B after the edge removal? That's a little better than getting to all nodes from a random node.
How do you choose the edges to be removed?
Can you tell more about your problem domain?
Just how large Is your graph? maybe BFS is just fine!
After you wrote that you are trying to find out whether an edge is a bridge or not, I suggest
you remove edges in decreasing order of their betweenness measure.
Essentially, betweenness is a measure of an edges (or vertices) centrality in a graph.
Edges with higher value of betweenness have greater potential of being a bridge in a graph.
Look it up on the web, the algorithm is called 'Girvan-Newman algorithm'.