Finding the total weight in MST in Prim's Algorithm - greedy

currently I'm making a comparison between the Prim's Algorithm and Kruskal's Algorithm. Both codes are from GeeksforGeeks, however only the Kruskal's algorithm has the total calculated weight in finding the MST. The Prim's algorithm doesn't have one, and I don't have any idea on how can I output the total weight. I hope you can help me.
Here's the code for the Kruskal's Algorithm (from GeeksforGeeks):
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = []
def addEdge(self, u, v, w):
self.graph.append([u, v, w])
def find(self, parent, i):
if parent[i] == i:
return i
return self.find(parent, parent[i])
def union(self, parent, rank, x, y):
xroot = self.find(parent, x)
yroot = self.find(parent, y)
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
else:
parent[yroot] = xroot
rank[xroot] += 1
def KruskalMST(self):
result = []
i = 0
e = 0
self.graph = sorted(self.graph,
key=lambda item: item[2])
parent = []
rank = []
for node in range(self.V):
parent.append(node)
rank.append(0)
while e < self.V - 1:
u, v, w = self.graph[i]
i = i + 1
x = self.find(parent, u)
y = self.find(parent, v)
if x != y:
e = e + 1
result.append([u, v, w])
self.union(parent, rank, x, y)
minimumCost = 0
print("Edges in the constructed MST")
for u, v, weight in result:
minimumCost += weight
print("%d -- %d == %d" % (u, v, weight))
print("Minimum Spanning Tree", minimumCost)
g = Graph(4)
g.addEdge(0, 1, 10)
g.addEdge(0, 2, 6)
g.addEdge(0, 3, 5)
g.addEdge(1, 3, 15)
g.addEdge(2, 3, 4)
g.KruskalMST()
The code for Prim's Algorithm (also from GeeksforGeeks):
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
minimumcost = 0
def printMST(self, parent):
print ("Edge \tWeight")
for i in range(1, self.V):
print (parent[i], "-", i, "\t", self.graph[i][parent[i]])
def minKey(self, key, mstSet):
min = sys.maxsize
for v in range(self.V):
if key[v] < min and mstSet[v] == False:
min = key[v]
min_index = v
return min_index
def primMST(self):
key = [sys.maxsize] * self.V
parent = [None] * self.V
key[0] = 0
mstSet = [False] * self.V
parent[0] = -1
for cout in range(self.V):
u = self.minKey(key, mstSet)
mstSet[u] = True
for v in range(self.V):
if self.graph[u][v] > 0 and mstSet[v] == False and key[v] > self.graph[u][v]:
key[v] = self.graph[u][v]
parent[v] = u
self.printMST(parent)
g = Graph(5)
g.graph = [ [0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0]]
g.primMST();

Related

Keras Error TypeError: ('Keyword argument not understood:', 'mode')

**I am using 100 tiramisu code and I am getting this error. I know it is probably because of version changes in Keras but not sure how to fix it.
I have changed the old merger method to keras.layer.concatenate but it still gives the same error.**
def relu(x): return Activation('relu')(x)
def dropout(x, p): return Dropout(p)(x) if p else x
def bn(x): return BatchNormalization(mode=2, axis=-1)(x)
def relu_bn(x): return relu(bn(x))
def concat(xs): return keras.layers.Concatenate(xs, mode='concat', concat_axis=-1)
def conv(x, nf, sz, wd, p, stride=1):
# x = Convolution2D(nf, sz, sz, init='he_uniform', border_mode='same',
# subsample=(stride,stride), W_regularizer=regularizers.l1_l2(wd))(x)
x = Convolution2D(nf, (sz, sz), padding='same',
strides=(stride,stride), kernel_regularizer=regularizers.l1_l2(wd))(x)
return dropout(x, p)
def down_path(x, nb_layers, growth_rate, p, wd):
skips = []
for i,n in enumerate(nb_layers):
x,added = dense_block(n,x,growth_rate,p,wd)
skips.append(x)
x = transition_dn(x, p=p, wd=wd)
return skips, added
def transition_up(added, wd=0):
x = concat(added)
_,r,c,ch = x.get_shape().as_list()
W_regularizer=l2(wd))(x)
return Deconvolution2D(ch, (3, 3), (None,r*2,c*2,ch),
padding='same', stride=(2,2), kernel_regularizer=l2(wd))(x)
def up_path(added, skips, nb_layers, growth_rate, p, wd):
for i,n in enumerate(nb_layers):
x = transition_up(added, wd)
x = concat([x,skips[i]])
x,added = dense_block(n,x,growth_rate,p,wd)
return x
def reverse(a): return list(reversed(a))
def create_tiramisu(nb_classes, img_input, nb_dense_block=6,
growth_rate=16, nb_filter=48, nb_layers_per_block=5, p=None, wd=0):
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block)
else: nb_layers = [nb_layers_per_block] * nb_dense_block
x = conv(img_input, nb_filter, 3, wd, 0)
skips,added = down_path(x, nb_layers, growth_rate, p, wd)
x = up_path(added, reverse(skips[:-1]), reverse(nb_layers[:-1]), growth_rate, p, wd)
x = conv(x, nb_classes, 1, wd, 0)
_,r,c,f = x.get_shape().as_list()
x = Reshape((-1, nb_classes))(x)
return Activation('softmax')(x)
input_shape = (224,224,3)
img_input = Input(shape=input_shape)
x = create_tiramisu(32, img_input, nb_layers_per_block=[4,5,7,10,12,15], p=0.2, wd=1e-4)
The error I am getting is:
TypeError Traceback (most recent call last)
<ipython-input-80-acecdf7dd0b2> in <module>()
1 input_shape = (224,224,3)
2 img_input = Input(shape=input_shape)
----> 3 x = create_tiramisu(32, img_input, nb_layers_per_block=[4,5,7,10,12,15], p=0.2, wd=1e-4)
10 frames
/usr/local/lib/python3.7/dist-packages/keras/utils/generic_utils.py in validate_kwargs(kwargs, allowed_kwargs, error_message)
1172 for kwarg in kwargs:
1173 if kwarg not in allowed_kwargs:
-> 1174 raise TypeError(error_message, kwarg)
1175
1176
TypeError: ('Keyword argument not understood:', 'mode')
I have tried to change a few arguments which changed from Keras version but still give the wrong answer.

Why is all the outputs from my Neural Network the same when using the ReLU activation function?

When using the ReLU activation function in my Neural Network I noticed all the outputs were the same. The Sigmoid activation function works just fine. Is there something wrong with the ReLU function? Why is this happening? What can I do to fix this?
The example is a simple XOR Neural Network written in Lua.
math.randomseed(os.time())
local nn = require("NeuralNetwork")
network = nn.new(2,1,1,4,0.2, "ReLU")
local x=0
local attempts = 100000
for i = 1,attempts do
x+=1
if x > 10000 then
wait()
x=0
end
network:backPropagate({0, 0}, {0})
network:backPropagate({1, 0}, {1})
network:backPropagate({0, 1}, {1})
network:backPropagate({1, 1}, {0})
end
print("0 0 | "..network:feedForward({0,0})[1])
print("1 0 | "..network:feedForward({1,0})[1])
print("0 1 | "..network:feedForward({0,1})[1])
print("1 1 | "..network:feedForward({1,1})[1])
Output:
0 0 | 0.48780487804878037
1 0 | 0.48780487804878037
0 1 | 0.48780487804878037
1 1 | 0.48780487804878037
- Library -
local nn = {}
nn.__index = nn
nn.ActivationFunctions = {
sigmoid = function(x) return 1/(1+math.exp(-x)) end,
ReLU = function(x) return math.max(0, x) end,
}
nn.Derivatives = {
sigmoid = function(x) return x * (1 - x) end,
ReLU = function(x) return x >= 0 and 1 or 0 end,
}
nn.CostFunctions = {
MSE = function(outputs, expected)
local sum = 0
for i = 1, #outputs do
sum += (expected[i] - outputs[i])^2
end
return sum/#outputs
end,
}
function nn.new(inputs, outputs, hiddenLayers, neurons, learningRate, activationFunction)
local self = setmetatable({}, nn)
self.learningRate = learningRate or .3
self.activationFunction = activationFunction or "ReLu"
self.net = {}
local net = self.net
local layers = hiddenLayers+2
for i = 1, layers do
net[i] = {}
end
for i = 1, inputs do
net[1][i] = {value = 0}
end
for i = 2, layers-1 do
for x = 1, neurons do
net[i][x] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[i-1] do
net[i][x].weights[z] = math.random()*2-1
end
end
end
for i = 1, outputs do
net[layers][i] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[layers-1] do
net[layers][i].weights[z] = math.random()*2-1
end
end
return self
end
function nn.newFromRawData(data)
return setmetatable(data, nn)
end
function nn:feedForward(inputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #inputLayer do
inputLayer[i].value = inputs[i]
end
for i = 2, layers do
local layer = net[i]
for x = 1, #layer do
local sum = layer[x].bias
for z = 1, #net[i-1] do
sum += net[i-1][z].value * layer[x].weights[z]
end
layer[x].netInput = sum
layer[x].value = nn.ActivationFunctions[activation](sum)
end
end
local outputs = {}
for i = 1, #outputLayer do
table.insert(outputs, outputLayer[i].value)
end
return outputs
end
function nn:backPropagate(inputs, expected)
local outputs = self:feedForward(inputs)
--print(outputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local lr = self.learningRate
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #outputLayer do
local delta = -(expected[i] - outputs[i]) * nn.Derivatives[activation](outputs[i])
outputLayer[i].delta = delta
end
for i = layers-1, 2, -1 do
local layer = net[i]
local nextLayer = net[i+1]
for x = 1, #layer do
local delta = 0
for z = 1, #nextLayer do
delta += nextLayer[z].delta * nextLayer[z].weights[x]
end
layer[x].delta = delta * nn.Derivatives[activation](layer[x].value)
end
end
for i = 2, layers do
local lastLayer = net[i-1]
for x = 1, #net[i] do
net[i][x].bias -= lr * net[i][x].delta
for z = 1, #lastLayer do
net[i][x].weights[z] -= lr * net[i][x].delta * lastLayer[z].value
end
end
end
end
return nn

I am trying to give the heuristics using random function. Can anyone please correct me

def getNeighbors(v, adjacency_list):
return adjacency_list[v]
import random
def h(n):
H = {
'A': 14,
'B': 12,
'C': 11,
'D': 6,
'E': 4,
'F': 11,
'Z':random.randint(0,22),
Error message shows in this line saying invalid syntax.
h('Z')=='Z',
}
return H[n]
def A_star(start_node, stop_node,adjacency_list):
open_list = set([start_node])
closed_list = set([])
g = {}
g[start_node] = 0
parents = {}
parents[start_node] = start_node
while len(open_list) > 0:
n = None
for v in open_list:
if n == None or g[v] + h(v) < g[n] + h(n):
n = v;
if n == None:
print('Path does not exist!')
return None
if n == stop_node:
reconst_path = []
while parents[n] != n:
reconst_path.append(n)
n = parents[n]
reconst_path.append(start_node)
reconst_path.reverse()
print('Path found: {}'.format(reconst_path))
return reconst_path
for (m, weight) in getNeighbors(n,adjacency_list):
if m not in open_list and m not in closed_list:
open_list.add(m)
parents[m] = n
g[m] = g[n] + weight
else:
if g[m] > g[n] + weight:
g[m] = g[n] + weight
parents[m] = n
if m in closed_list:
closed_list.remove(m)
open_list.add(m)
open_list.remove(n)
closed_list.add(n)
print('Path does not exist!')
return None
adjacency_list = {
'A': [('B', 4), ('C', 3)],
'B': [('E', 12),('F',5)],
'C': [('D', 7),('E',10)],
'D': [('E',2)],
'E': [('Z',5)],
'F': [('Z',16)]
}
A_star('A', 'Z',adjacency_list)
I would like to know if there is anything wrong with my code. Here I am trying to implement the A star algorithm while giving heuristics randomly, Are there any mistakes that I have done. I know there might be a very silly one. If you find anything please correct me.

math library is missing in the latest update of Logitech G-Hub

local delay = math.random(25, 50)
[string "LuaVM"]:5: attempt to index a nil value (global 'math')
I can't use math.random anymore is there any way to fix this ?
If math library is missed you can insert the following code block at the beginning of your script.
It will not fix the whole math library, but only some of the most frequently used functions (including math.random).
It will also fix the following errors:
bad argument #1 to 'Sleep' (number has no integer representation)
attempt to call a nil value (field 'getn')
do
local state_8, state_45, cached_bits, cached_bits_qty = 2, 0, 0, 0
local prev_width, prev_bits_in_factor, prev_k = 0
for c in GetDate():gmatch"." do
state_45 = state_45 % 65537 * 23456 + c:byte()
end
local function get_53_random_bits()
local value53 = 0
for shift = 26, 27 do
local p = 2^shift
state_45 = (state_45 * 233 + 7161722017421) % 35184372088832
repeat state_8 = state_8 * 76 % 257 until state_8 ~= 1
local r = state_8 % 32
local n = state_45 / 2^(13 - (state_8 - r) / 32)
n = (n - n%1) % 2^32 / 2^r
value53 = value53 * p + ((n%1 * 2^32) + (n - n%1)) % p
end
return value53
end
for j = 1, 10 do get_53_random_bits() end
local function get_random_bits(number_of_bits)
local pwr_number_of_bits = 2^number_of_bits
local result
if number_of_bits <= cached_bits_qty then
result = cached_bits % pwr_number_of_bits
cached_bits = (cached_bits - result) / pwr_number_of_bits
else
local new_bits = get_53_random_bits()
result = new_bits % pwr_number_of_bits
cached_bits = (new_bits - result) / pwr_number_of_bits * 2^cached_bits_qty + cached_bits
cached_bits_qty = 53 + cached_bits_qty
end
cached_bits_qty = cached_bits_qty - number_of_bits
return result
end
table = table or {}
table.getn = table.getn or function(x) return #x end
math = math or {}
math.huge = math.huge or 1/0
math.abs = math.abs or function(x) return x < 0 and -x or x end
math.floor = math.floor or function(x) return x - x%1 end
math.ceil = math.ceil or function(x) return x + (-x)%1 end
math.min = math.min or function(x, y) return x < y and x or y end
math.max = math.max or function(x, y) return x > y and x or y end
math.sqrt = math.sqrt or function(x) return x^0.5 end
math.pow = math.pow or function(x, y) return x^y end
math.frexp = math.frexp or
function(x)
local e = 0
if x == 0 then
return x, e
end
local sign = x < 0 and -1 or 1
x = x * sign
while x >= 1 do
x = x / 2
e = e + 1
end
while x < 0.5 do
x = x * 2
e = e - 1
end
return x * sign, e
end
math.exp = math.exp or
function(x)
local e, t, k, p = 0, 1, 1
repeat e, t, k, p = e + t, t * x / k, k + 1, e
until e == p
return e
end
math.log = math.log or
function(x)
assert(x > 0)
local a, b, c, d, e, f = x < 1 and x or 1/x, 0, 0, 1, 1
repeat
repeat
c, d, e, f = c + d, b * d / e, e + 1, c
until c == f
b, c, d, e, f = b + 1 - a * c, 0, 1, 1, b
until b <= f
return a == x and -f or f
end
math.log10 = math.log10 or
function(x)
return math.log(x) / 2.3025850929940459
end
math.random = math.random or
function(m, n)
if m then
if not n then
m, n = 1, m
end
local k = n - m + 1
if k < 1 or k > 2^53 then
error("Invalid arguments for function 'random()'", 2)
end
local width, bits_in_factor, modk
if k == prev_k then
width, bits_in_factor = prev_width, prev_bits_in_factor
else
local pwr_prev_width = 2^prev_width
if k > pwr_prev_width / 2 and k <= pwr_prev_width then
width = prev_width
else
width = 53
local width_low = -1
repeat
local w = (width_low + width) / 2
w = w - w%1
if k <= 2^w then
width = w
else
width_low = w
end
until width - width_low == 1
prev_width = width
end
bits_in_factor = 0
local bits_in_factor_high = width + 1
while bits_in_factor_high - bits_in_factor > 1 do
local bits_in_new_factor = (bits_in_factor + bits_in_factor_high) / 2
bits_in_new_factor = bits_in_new_factor - bits_in_new_factor%1
if k % 2^bits_in_new_factor == 0 then
bits_in_factor = bits_in_new_factor
else
bits_in_factor_high = bits_in_new_factor
end
end
prev_k, prev_bits_in_factor = k, bits_in_factor
end
local factor, saved_bits, saved_bits_qty, pwr_saved_bits_qty = 2^bits_in_factor, 0, 0, 2^0
k = k / factor
width = width - bits_in_factor
local pwr_width = 2^width
local gap = pwr_width - k
repeat
modk = get_random_bits(width - saved_bits_qty) * pwr_saved_bits_qty + saved_bits
local modk_in_range = modk < k
if not modk_in_range then
local interval = gap
saved_bits = modk - k
saved_bits_qty = width - 1
pwr_saved_bits_qty = pwr_width / 2
repeat
saved_bits_qty = saved_bits_qty - 1
pwr_saved_bits_qty = pwr_saved_bits_qty / 2
if pwr_saved_bits_qty <= interval then
if saved_bits < pwr_saved_bits_qty then
interval = nil
else
interval = interval - pwr_saved_bits_qty
saved_bits = saved_bits - pwr_saved_bits_qty
end
end
until not interval
end
until modk_in_range
return m + modk * factor + get_random_bits(bits_in_factor)
else
return get_53_random_bits() / 2^53
end
end
local orig_Sleep = Sleep
function Sleep(x)
return orig_Sleep(x - x%1)
end
end

Unable to set up Pybrain LSTM module for Reber Grammar

I'm trying to use Pybrain to predict sequences of characters belonging to the Reber grammar.
Concretely what I'm doing is generating strings using the Reber grammar graph (you can check it here : http://www.felixgers.de/papers/phd.pdf page 22). An example of such string could be BPVVE. I want my neural network to learn the underlying rules of the grammar. For each of these string I create a sequence that would typically look like this :
[B, T, S, X, P, V, E,] , [B, T, S, X, P, V, E,]
B -> value = [1, 0, 0, 0, 0, 0, 0,] , target = [0, 0, 0, 0, 1, 0, 0,]
P -> value = [0, 0, 0, 0, 1, 0, 0,] , target = [0, 0, 0, 0, 0, 1, 0,]
V -> value = [0, 0, 0, 0, 0, 1, 0,] , target = [0, 0, 0, 0, 0, 1, 0,]
V -> value = [0, 0, 0, 0, 0, 1, 0,] , target = [0, 0, 0, 0, 0, 0, 1,]
E -> E is ignored for now because it marks the end
as you can see the value is just a 7-d vector representing the current letter and the target is the next letter in the Reber word.
Here is the code I'm trying to run :
#!/usr/bin/python
import reberGrammar as reber
import random as rnd
from pylab import *
from pybrain.supervised import RPropMinusTrainer
from pybrain.supervised import BackpropTrainer
from pybrain.datasets import SequenceClassificationDataSet
from pybrain.structure.modules import LSTMLayer, SoftmaxLayer
from pybrain.tools.validation import testOnSequenceData
from pybrain.tools.shortcuts import buildNetwork
def reberToListInt(word): #e.g. "BPVVE" -> [0,4,3,3,5]
out = [None]*len(word)
for i,l in enumerate(word):
if l == 'B':
out[i] = 0
elif l == 'T':
out[i] = 1
elif l == 'S':
out[i] = 2
elif l == 'V':
out[i] = 3
elif l == 'P':
out[i] = 4
elif l == 'E':
out[i] = 5
else :
out[i] = 6
return out
def buildReberDataSet(numSample):
"""Generate a 7 class dataset"""
reberLexicon = reber.ReberGrammarLexicon(numSample)
DS = SequenceClassificationDataSet(7, 7, nb_classes=7)
for rw in reberLexicon.lexicon:
DS.newSequence()
rw2 = reberToListInt(rw)
for i in range(len(rw2)-1): #inserting one letter at a time
inpt = outpt = [0.0]*7
inpt[rw2[i]]=1.0
outpt[rw2[i+1]]=1.0
DS.addSample(inpt,outpt)
return DS
def printDataSet(DS, numLines): #just to print some stat
print "\t############"
print "Number of sequences: ",DS.getNumSequences()
print "Input and output dimensions: ", DS.indim,"\t", DS.outdim
print "\n"
for i in range(numLines):
for inp, target in DS.getSequenceIterator(i):
print inp,
print "\n"
print "\t#############"
'''Dataset creation / split into training and test sets'''
fullDS = buildReberDataSet(700)
tstdata, trndata = fullDS.splitWithProportion( 0.25 )
trndata._convertToOneOfMany( bounds=[0.,1.])
tstdata._convertToOneOfMany( bounds=[0.,1.])
#printDataSet(trndata,2)
'''Network setup / training'''
rnn = buildNetwork( trndata.indim, 7, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer( rnn, dataset=trndata, verbose=True )
#trainer = BackpropTrainer( rnn, dataset=trndata, verbose=True, momentum=0.9, learningrate=0.5 )
trainError=[]
testError =[]
#errors = trainer.trainUntilConvergence()
for i in range(9):
trainer.trainEpochs( 2 )
trainError.append(100. * (1.0-testOnSequenceData(rnn, trndata)))
testError.append(100. * (1.0-testOnSequenceData(rnn, tstdata)))
print "train error: %5.2f%%" % trainError[i], ", test error: %5.2f%%" % testError[i]
plot(trainError)
hold(True)
plot(testError)
show()
I fail to train this net. The errors are fluctuating a lot and there is no real convergence. I would really appreciate some advises on this.
Here is the code I'm using to generate Reber strings :
#!/usr/bin/python
import random as rnd
class ReberGrammarLexicon(object):
lexicon = set() #contain Reber words
graph = [ [(1,'T'), (5,'P')], \
[(1, 'S'), (2, 'X')], \
[(3,'S') ,(5, 'X')], \
[(6, 'E')], \
[(3, 'V'),(2, 'P')], \
[(4, 'V'), (5, 'T')] ] #store the graph
def __init__(self, num, maxSize = 1000): #fill Lexicon with num words
self.maxSize = maxSize
if maxSize < 5:
raise NameError('maxSize too small, require maxSize > 4')
while len(self.lexicon) < num:
word = self.generateWord()
if word != None:
self.lexicon.add(word)
def generateWord(self): #generate one word
c = 2
currentEdge = 0
word = 'B'
while c <= self.maxSize:
inc = rnd.randint(0,len(self.graph[currentEdge])-1)
nextEdge = self.graph[currentEdge][inc][0]
word += self.graph[currentEdge][inc][1]
currentEdge = nextEdge
if currentEdge == 6 :
break
c+=1
if c > self.maxSize :
return None
return word
Thanks,
Best

Resources