Hello I have recently started working on the CNN for the binary classification.
Here is my network:
net.meta.inputSize = [28 28 3] ;
net.meta.trainOpts.learningRate = 1e-02 ;
net.meta.trainOpts.numEpochs = 50 ;
net.meta.trainOpts.batchSize = 280 ;
opts.train.gpus = [];
f=1/100 ;
net.layers = {} ;
net.layers{end+1} = struct('type', 'conv','weights', {{f*randn(5,5,3,50, 'single'), zeros(1, 50, 'single')}},'stride', 1,'pad', 0);
net.layers{end+1} = struct('type', 'pool','method', 'max', 'pool', [2 2], 'stride', 2, 'pad', 0) ;
net.layers{end+1} = struct('type', 'conv', 'weights', {{f*randn(5,5,50,30, 'single'),zeros(1,30,'single')}}, 'stride', 1, 'pad', 0) ;
net.layers{end+1} = struct('type', 'pool', 'method', 'max', 'pool', [2 2], 'stride', 2, 'pad', 0) ;
net.layers{end+1} = struct('type', 'conv', 'weights', {{f*randn(4,4,30,50, 'single'), zeros(1,50,'single')}}, 'stride', 1, 'pad', 0) ;
net.layers{end+1} = struct('type', 'relu') ;
net.layers{end+1} = struct('type', 'conv', 'weights', {{f*randn(1,1,50,2, 'single'), zeros(1,2,'single')}}, 'stride', 1, 'pad', 0) ;
net.layers{end+1}= struct('type', 'dropout', 'rate', 0.9);
net.layers{end+1} = struct('type', 'softmaxloss') ;
net.meta.classes.name =arrayfun(#(x)sprintf('%d',x),0:1,'UniformOutput',false);
disp( 'Net is Ok.' );
[net, info] = cnn_train( net, imdb, #getBatch, net.meta.trainOpts, opts.train, 'val', find( imdb.images.set == 2 ) ) ;
I am getting this output at the end if learning rate is 1e-2
If reduce the learning significantly I am getting this output
Anyone please help me to tell which learning is good and what these graphs are showing?
UPDATE:
I was doing slightly wrong. As the cnn_train function in MatConvNet is 'multiclass' by default so I have changed that to 'binary' Now I am getting this result with the combination of different learning rates [0.00005*ones(1,10) 0.000005*ones(1,20) 0.0000005*ones(1,20)].
Related
currently I'm making a comparison between the Prim's Algorithm and Kruskal's Algorithm. Both codes are from GeeksforGeeks, however only the Kruskal's algorithm has the total calculated weight in finding the MST. The Prim's algorithm doesn't have one, and I don't have any idea on how can I output the total weight. I hope you can help me.
Here's the code for the Kruskal's Algorithm (from GeeksforGeeks):
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = []
def addEdge(self, u, v, w):
self.graph.append([u, v, w])
def find(self, parent, i):
if parent[i] == i:
return i
return self.find(parent, parent[i])
def union(self, parent, rank, x, y):
xroot = self.find(parent, x)
yroot = self.find(parent, y)
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
else:
parent[yroot] = xroot
rank[xroot] += 1
def KruskalMST(self):
result = []
i = 0
e = 0
self.graph = sorted(self.graph,
key=lambda item: item[2])
parent = []
rank = []
for node in range(self.V):
parent.append(node)
rank.append(0)
while e < self.V - 1:
u, v, w = self.graph[i]
i = i + 1
x = self.find(parent, u)
y = self.find(parent, v)
if x != y:
e = e + 1
result.append([u, v, w])
self.union(parent, rank, x, y)
minimumCost = 0
print("Edges in the constructed MST")
for u, v, weight in result:
minimumCost += weight
print("%d -- %d == %d" % (u, v, weight))
print("Minimum Spanning Tree", minimumCost)
g = Graph(4)
g.addEdge(0, 1, 10)
g.addEdge(0, 2, 6)
g.addEdge(0, 3, 5)
g.addEdge(1, 3, 15)
g.addEdge(2, 3, 4)
g.KruskalMST()
The code for Prim's Algorithm (also from GeeksforGeeks):
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
minimumcost = 0
def printMST(self, parent):
print ("Edge \tWeight")
for i in range(1, self.V):
print (parent[i], "-", i, "\t", self.graph[i][parent[i]])
def minKey(self, key, mstSet):
min = sys.maxsize
for v in range(self.V):
if key[v] < min and mstSet[v] == False:
min = key[v]
min_index = v
return min_index
def primMST(self):
key = [sys.maxsize] * self.V
parent = [None] * self.V
key[0] = 0
mstSet = [False] * self.V
parent[0] = -1
for cout in range(self.V):
u = self.minKey(key, mstSet)
mstSet[u] = True
for v in range(self.V):
if self.graph[u][v] > 0 and mstSet[v] == False and key[v] > self.graph[u][v]:
key[v] = self.graph[u][v]
parent[v] = u
self.printMST(parent)
g = Graph(5)
g.graph = [ [0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0]]
g.primMST();
When using the ReLU activation function in my Neural Network I noticed all the outputs were the same. The Sigmoid activation function works just fine. Is there something wrong with the ReLU function? Why is this happening? What can I do to fix this?
The example is a simple XOR Neural Network written in Lua.
math.randomseed(os.time())
local nn = require("NeuralNetwork")
network = nn.new(2,1,1,4,0.2, "ReLU")
local x=0
local attempts = 100000
for i = 1,attempts do
x+=1
if x > 10000 then
wait()
x=0
end
network:backPropagate({0, 0}, {0})
network:backPropagate({1, 0}, {1})
network:backPropagate({0, 1}, {1})
network:backPropagate({1, 1}, {0})
end
print("0 0 | "..network:feedForward({0,0})[1])
print("1 0 | "..network:feedForward({1,0})[1])
print("0 1 | "..network:feedForward({0,1})[1])
print("1 1 | "..network:feedForward({1,1})[1])
Output:
0 0 | 0.48780487804878037
1 0 | 0.48780487804878037
0 1 | 0.48780487804878037
1 1 | 0.48780487804878037
- Library -
local nn = {}
nn.__index = nn
nn.ActivationFunctions = {
sigmoid = function(x) return 1/(1+math.exp(-x)) end,
ReLU = function(x) return math.max(0, x) end,
}
nn.Derivatives = {
sigmoid = function(x) return x * (1 - x) end,
ReLU = function(x) return x >= 0 and 1 or 0 end,
}
nn.CostFunctions = {
MSE = function(outputs, expected)
local sum = 0
for i = 1, #outputs do
sum += (expected[i] - outputs[i])^2
end
return sum/#outputs
end,
}
function nn.new(inputs, outputs, hiddenLayers, neurons, learningRate, activationFunction)
local self = setmetatable({}, nn)
self.learningRate = learningRate or .3
self.activationFunction = activationFunction or "ReLu"
self.net = {}
local net = self.net
local layers = hiddenLayers+2
for i = 1, layers do
net[i] = {}
end
for i = 1, inputs do
net[1][i] = {value = 0}
end
for i = 2, layers-1 do
for x = 1, neurons do
net[i][x] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[i-1] do
net[i][x].weights[z] = math.random()*2-1
end
end
end
for i = 1, outputs do
net[layers][i] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[layers-1] do
net[layers][i].weights[z] = math.random()*2-1
end
end
return self
end
function nn.newFromRawData(data)
return setmetatable(data, nn)
end
function nn:feedForward(inputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #inputLayer do
inputLayer[i].value = inputs[i]
end
for i = 2, layers do
local layer = net[i]
for x = 1, #layer do
local sum = layer[x].bias
for z = 1, #net[i-1] do
sum += net[i-1][z].value * layer[x].weights[z]
end
layer[x].netInput = sum
layer[x].value = nn.ActivationFunctions[activation](sum)
end
end
local outputs = {}
for i = 1, #outputLayer do
table.insert(outputs, outputLayer[i].value)
end
return outputs
end
function nn:backPropagate(inputs, expected)
local outputs = self:feedForward(inputs)
--print(outputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local lr = self.learningRate
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #outputLayer do
local delta = -(expected[i] - outputs[i]) * nn.Derivatives[activation](outputs[i])
outputLayer[i].delta = delta
end
for i = layers-1, 2, -1 do
local layer = net[i]
local nextLayer = net[i+1]
for x = 1, #layer do
local delta = 0
for z = 1, #nextLayer do
delta += nextLayer[z].delta * nextLayer[z].weights[x]
end
layer[x].delta = delta * nn.Derivatives[activation](layer[x].value)
end
end
for i = 2, layers do
local lastLayer = net[i-1]
for x = 1, #net[i] do
net[i][x].bias -= lr * net[i][x].delta
for z = 1, #lastLayer do
net[i][x].weights[z] -= lr * net[i][x].delta * lastLayer[z].value
end
end
end
end
return nn
I've been trying figure out what I have done wrong for many hours, but just can't figure out. I've even looked at other basic Neural Network libraries to make sure that my gradient descent algorithms were correct, but it still isn't working.
I'm trying to teach it XOR but it outputs -
input (0 0) | 0.011441891321516094
input (1 0) | 0.6558508610135193
input (0 1) | 0.6558003273099053
input (1 1) | 0.6563021185296245
after 1000 trainings, so clearly there's something wrong.
The code is written in lua and I created the Neural Network from raw data so you can easily understand how the data is formatted.
- Training code -
math.randomseed(os.time())
local nn = require("NeuralNetwork")
local network = nn.newFromRawData({
["activationFunction"] = "sigmoid",
["learningRate"] = 0.3,
["net"] = {
[1] = {
[1] = {
["value"] = 0
},
[2] = {
["value"] = 0
}
},
[2] = {
[1] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
},
[2] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
},
[3] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
},
[4] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
}
},
[3] = {
[1] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1,
[3] = 1,
[4] = 1
}
}
}
}
})
attempts = 1000
for i = 1,attempts do
network:backPropagate({0,0},{0})
network:backPropagate({1,0},{1})
network:backPropagate({0,1},{1})
network:backPropagate({1,1},{0})
end
print("Results:")
print("input (0 0) | "..network:feedForward({0,0})[1])
print("input (1 0) | "..network:feedForward({1,0})[1])
print("input (0 1) | "..network:feedForward({0,1})[1])
print("input (1 1) | "..network:feedForward({1,1})[1])
- Library -
local nn = {}
nn.__index = nn
nn.ActivationFunctions = {
sigmoid = function(x) return 1/(1+math.exp(-x/1)) end,
ReLu = function(x) return math.max(0, x) end,
}
nn.Derivatives = {
sigmoid = function(x) return x * (1 - x) end,
ReLu = function(x) return x > 0 and 1 or 0 end,
}
nn.CostFunctions = {
MSE = function(outputs, expected)
local sum = 0
for i = 1, #outputs do
sum += 1/2*(expected[i] - outputs[i])^2
end
return sum/#outputs
end,
}
function nn.new(inputs, outputs, hiddenLayers, neurons, learningRate, activationFunction)
local self = setmetatable({}, nn)
self.learningRate = learningRate or .3
self.activationFunction = activationFunction or "ReLu"
self.net = {}
local net = self.net
local layers = hiddenLayers+2
for i = 1, layers do
net[i] = {}
end
for i = 1, inputs do
net[1][i] = {value = 0}
end
for i = 2, layers-1 do
for x = 1, neurons do
net[i][x] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[i-1] do
net[i][x].weights[z] = math.random()*2-1
end
end
end
for i = 1, outputs do
net[layers][i] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[layers-1] do
net[layers][i].weights[z] = math.random()*2-1
end
end
return self
end
function nn.newFromRawData(data)
return setmetatable(data, nn)
end
function nn:feedForward(inputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #inputLayer do
inputLayer[i].value = inputs[i]
end
for i = 2, layers do
local layer = net[i]
for x = 1, #layer do
local sum = layer[x].bias
for z = 1, #net[i-1] do
sum += net[i-1][z].value * layer[x].weights[z]
end
layer[x].netInput = sum
layer[x].value = nn.ActivationFunctions[activation](sum)
end
end
local outputs = {}
for i = 1, #outputLayer do
table.insert(outputs, outputLayer[i].value)
end
return outputs
end
function nn:backPropagate(inputs, expected)
local outputs = self:feedForward(inputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local lr = self.learningRate
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #outputLayer do
local delta = -(expected[i] - outputs[i]) * nn.Derivatives[activation](net[layers][i].value)
outputLayer[i].delta = delta
end
for i = layers-1, 2, -1 do
local layer = net[i]
local nextLayer = net[i+1]
for x = 1, #layer do
local delta = 0
for z = 1, #nextLayer do
delta += nextLayer[z].delta * nextLayer[z].weights[x]
end
layer[x].delta = delta * nn.Derivatives[activation](layer[x].value)
end
end
for i = 2, layers do
local lastLayer = net[i-1]
for x = 1, #net[i] do
net[i][x].bias -= lr * net[i][x].delta
for z = 1, #lastLayer do
net[i][x].weights[z] -= lr * net[i][x].delta * lastLayer[z].value
end
end
end
end
return nn
Any help would be highly appreciated, thanks!
All initial weights must be DIFFERENT numbers, otherwise backpropagation will not work. For example, you can replace 1 with math.random()
Increase number of attempts to 10000
With these modifications, your code works fine:
Results:
input (0 0) | 0.028138230938126
input (1 0) | 0.97809448578087
input (0 1) | 0.97785000216126
input (1 1) | 0.023128477689456
My code is represented in Dart, but this is more general to the Binary Tree data structure and Register-based VM implementation. I have commented the code for you to understand if you do not know Dart as well.
So, here are my nodes:
enum NodeType {
numberNode,
addNode,
subtractNode,
multiplyNode,
divideNode,
plusNode,
minusNode,
}
NumberNode has a number value in it.
AddNode, SubtractNode, MultiplyNode, DivideNode, they are really just Binary Op Nodes .
PlusNode, MinusNode, are Unary Operator nodes.
The tree is generated based off Order of Operations. Unary Operation first, then multiplication and division, and then addition and subtraction. E.g. "1 + 2 * -3" becomes "(1 + (2 * (-3)))"
Here is my code to trying to walk over the AST:
/// Converts tree to Register-based VM code
List<Opcode> convertNodeToCode(Node node) {
List<Opcode> result = [const Opcode(OpcodeKind.loadn, 2, -1)];
bool counterHasBeenZero = false;
bool binOpDebounce = false;
int counter = 0;
List<Opcode> convert(Node node) {
switch (node.nodeType) {
case NodeType.numberNode:
counter = counter == 0 ? 1 : 0;
if (counter == 0 && !counterHasBeenZero) {
counterHasBeenZero = true;
} else {
counter = 1;
}
return [Opcode(OpcodeKind.loadn, counter, (node as NumberNode).value)];
case NodeType.addNode:
var aNode = node as AddNode;
return convert(aNode.nodeA) +
convert(aNode.nodeB) +
[
const Opcode(
OpcodeKind.addn,
0,
1,
)
];
case NodeType.subtractNode:
var sNode = node as SubtractNode;
var result = convert(sNode.nodeA) +
convert(sNode.nodeB) +
(binOpDebounce
? [
const Opcode(
OpcodeKind.subn,
0,
0,
1,
)
]
: [
const Opcode(
OpcodeKind.subn,
0,
1,
)
]);
if (!binOpDebounce) binOpDebounce = true;
return result;
case NodeType.multiplyNode:
var mNode = node as MultiplyNode;
var result = convert(mNode.nodeA) +
convert(mNode.nodeB) +
(binOpDebounce
? [
const Opcode(
OpcodeKind.muln,
0,
0,
1,
)
]
: [
const Opcode(
OpcodeKind.muln,
0,
1,
)
]);
if (!binOpDebounce) binOpDebounce = true;
return result;
case NodeType.divideNode:
var dNode = node as DivideNode;
var result = convert(dNode.nodeA) +
convert(dNode.nodeB) +
(binOpDebounce
? [
const Opcode(
OpcodeKind.divn,
0,
0,
1,
)
]
: [
const Opcode(
OpcodeKind.divn,
0,
1,
)
]);
if (!binOpDebounce) binOpDebounce = true;
return result;
case NodeType.plusNode:
return convert((node as PlusNode).node);
case NodeType.minusNode:
return convert((node as MinusNode).node) +
[Opcode(OpcodeKind.muln, 1, 2)];
default:
throw Exception('Non-existent node type');
}
}
return result + convert(node) + [const Opcode(OpcodeKind.exit)];
}
I tried a method to just use 2-3 registers and using a counter to track where I loaded the number in the register, but the code gets ugly real quick and when I'm trying to do Order of Operations, it gets really hard to track where the numbers are with the counter. Basically, how I tried to make this code work is just store the number in register 1 or 0 and load the number if needed to and add the registers together to equal to register 0. Example, 1 + 2 + 3 + 4 becomes [r2 = -1.0, r1 = 1.0, r0 = 2.0, r0 = r1 + r0, r1 = 3.0, r0 = r1 + r0, r1 = 4.0, r0 = r1 + r0, exit]. When I tried this with multiplication though, this became very hard as it incorrectly multiplied the wrong number which is possibly because of the order of operations.
I tried to see if this way could be done as well:
// (1 + (2 * ((-2) + 3) * 5))
const code = [
// (-2)
Opcode(OpcodeKind.loadn, 1, -2), // r1 = -2;
// (2 + 3)
Opcode(OpcodeKind.loadn, 1, 2), // r1 = 2;
Opcode(OpcodeKind.loadn, 2, 3), // r2 = 3;
Opcode(OpcodeKind.addn, 2, 1, 2), // r2 = r1 + r2;
// (2 * (result) * 5)
Opcode(OpcodeKind.loadn, 1, 2), // r1 = 2;
Opcode(OpcodeKind.loadn, 3, 5), // r3 = 5;
Opcode(OpcodeKind.muln, 2, 1, 2), // r2 = r1 * r2;
Opcode(OpcodeKind.muln, 2, 2, 3), // r2 = r2 * r3;
// (1 + (result))
Opcode(OpcodeKind.loadn, 1, 1), // r1 = 1;
Opcode(OpcodeKind.addn, 1, 1, 2), // r1 = r1 + r2;
Opcode(OpcodeKind.exit), // exit halt
];
I knew this method would not work because if I'm going to iterate through the nodes I need to know the position of the numbers and registers beforehand, so I'd have to use another method or way to find the number/register.
You don't need to read all of above; those were just my attempts to try to produce register-based virtual machine code.
I want to see how you guys would do it or how you would make it.
I'm trying to use Pybrain to predict sequences of characters belonging to the Reber grammar.
Concretely what I'm doing is generating strings using the Reber grammar graph (you can check it here : http://www.felixgers.de/papers/phd.pdf page 22). An example of such string could be BPVVE. I want my neural network to learn the underlying rules of the grammar. For each of these string I create a sequence that would typically look like this :
[B, T, S, X, P, V, E,] , [B, T, S, X, P, V, E,]
B -> value = [1, 0, 0, 0, 0, 0, 0,] , target = [0, 0, 0, 0, 1, 0, 0,]
P -> value = [0, 0, 0, 0, 1, 0, 0,] , target = [0, 0, 0, 0, 0, 1, 0,]
V -> value = [0, 0, 0, 0, 0, 1, 0,] , target = [0, 0, 0, 0, 0, 1, 0,]
V -> value = [0, 0, 0, 0, 0, 1, 0,] , target = [0, 0, 0, 0, 0, 0, 1,]
E -> E is ignored for now because it marks the end
as you can see the value is just a 7-d vector representing the current letter and the target is the next letter in the Reber word.
Here is the code I'm trying to run :
#!/usr/bin/python
import reberGrammar as reber
import random as rnd
from pylab import *
from pybrain.supervised import RPropMinusTrainer
from pybrain.supervised import BackpropTrainer
from pybrain.datasets import SequenceClassificationDataSet
from pybrain.structure.modules import LSTMLayer, SoftmaxLayer
from pybrain.tools.validation import testOnSequenceData
from pybrain.tools.shortcuts import buildNetwork
def reberToListInt(word): #e.g. "BPVVE" -> [0,4,3,3,5]
out = [None]*len(word)
for i,l in enumerate(word):
if l == 'B':
out[i] = 0
elif l == 'T':
out[i] = 1
elif l == 'S':
out[i] = 2
elif l == 'V':
out[i] = 3
elif l == 'P':
out[i] = 4
elif l == 'E':
out[i] = 5
else :
out[i] = 6
return out
def buildReberDataSet(numSample):
"""Generate a 7 class dataset"""
reberLexicon = reber.ReberGrammarLexicon(numSample)
DS = SequenceClassificationDataSet(7, 7, nb_classes=7)
for rw in reberLexicon.lexicon:
DS.newSequence()
rw2 = reberToListInt(rw)
for i in range(len(rw2)-1): #inserting one letter at a time
inpt = outpt = [0.0]*7
inpt[rw2[i]]=1.0
outpt[rw2[i+1]]=1.0
DS.addSample(inpt,outpt)
return DS
def printDataSet(DS, numLines): #just to print some stat
print "\t############"
print "Number of sequences: ",DS.getNumSequences()
print "Input and output dimensions: ", DS.indim,"\t", DS.outdim
print "\n"
for i in range(numLines):
for inp, target in DS.getSequenceIterator(i):
print inp,
print "\n"
print "\t#############"
'''Dataset creation / split into training and test sets'''
fullDS = buildReberDataSet(700)
tstdata, trndata = fullDS.splitWithProportion( 0.25 )
trndata._convertToOneOfMany( bounds=[0.,1.])
tstdata._convertToOneOfMany( bounds=[0.,1.])
#printDataSet(trndata,2)
'''Network setup / training'''
rnn = buildNetwork( trndata.indim, 7, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer( rnn, dataset=trndata, verbose=True )
#trainer = BackpropTrainer( rnn, dataset=trndata, verbose=True, momentum=0.9, learningrate=0.5 )
trainError=[]
testError =[]
#errors = trainer.trainUntilConvergence()
for i in range(9):
trainer.trainEpochs( 2 )
trainError.append(100. * (1.0-testOnSequenceData(rnn, trndata)))
testError.append(100. * (1.0-testOnSequenceData(rnn, tstdata)))
print "train error: %5.2f%%" % trainError[i], ", test error: %5.2f%%" % testError[i]
plot(trainError)
hold(True)
plot(testError)
show()
I fail to train this net. The errors are fluctuating a lot and there is no real convergence. I would really appreciate some advises on this.
Here is the code I'm using to generate Reber strings :
#!/usr/bin/python
import random as rnd
class ReberGrammarLexicon(object):
lexicon = set() #contain Reber words
graph = [ [(1,'T'), (5,'P')], \
[(1, 'S'), (2, 'X')], \
[(3,'S') ,(5, 'X')], \
[(6, 'E')], \
[(3, 'V'),(2, 'P')], \
[(4, 'V'), (5, 'T')] ] #store the graph
def __init__(self, num, maxSize = 1000): #fill Lexicon with num words
self.maxSize = maxSize
if maxSize < 5:
raise NameError('maxSize too small, require maxSize > 4')
while len(self.lexicon) < num:
word = self.generateWord()
if word != None:
self.lexicon.add(word)
def generateWord(self): #generate one word
c = 2
currentEdge = 0
word = 'B'
while c <= self.maxSize:
inc = rnd.randint(0,len(self.graph[currentEdge])-1)
nextEdge = self.graph[currentEdge][inc][0]
word += self.graph[currentEdge][inc][1]
currentEdge = nextEdge
if currentEdge == 6 :
break
c+=1
if c > self.maxSize :
return None
return word
Thanks,
Best