I'm trying to implement this paper https://arxiv.org/pdf/1804.06962.pdf with lua/torch7
During the forward pass I got no problem but for the backward pass modele.gapbranch:backward(n, loss_grad) I got this error :
/home/narimene/distro/install/bin/luajit:
...e/narimene/distro/install/share/lua/5.1/nn/Container.lua:67: In 2 module of nn.Sequential:
/home/narimene/distro/install/share/lua/5.1/nn/Concat.lua:92: bad argument #1 to 'narrow' (number expected, got nil)
stack traceback:
[C]: in function 'narrow'
/home/narimene/distro/install/share/lua/5.1/nn/Concat.lua:92: in function </home/narimene/distro/install/share/lua/5.1/nn/Concat.lua:47>
[C]: in function 'xpcall'
...e/narimene/distro/install/share/lua/5.1/nn/Container.lua:63: in function 'rethrowErrors'
.../narimene/distro/install/share/lua/5.1/nn/Sequential.lua:84: in function 'backward'
gap2.lua:240: in function 'opfunc'
/home/narimene/distro/install/share/lua/5.1/optim/sgd.lua:44: in function 'sgd'
gap2.lua:247: in main chunk
[C]: in function 'dofile'
...ene/distro/install/lib/luarocks/rocks/trepl/scm-1/bin/th:150: in main chunk
[C]: at 0x563fabe66570
WARNING: If you see a stack trace below, it doesn't point to the place
where this error occurred. Please use only the one above.
stack traceback:
[C]: in function 'error'
...e/narimene/distro/install/share/lua/5.1/nn/Container.lua:67: in function 'rethrowErrors'
.../narimene/distro/install/share/lua/5.1/nn/Sequential.lua:84: in function 'backward'
gap2.lua:240: in function 'opfunc'
/home/narimene/distro/install/share/lua/5.1/optim/sgd.lua:44: in function 'sgd'
gap2.lua:247: in main chunk
[C]: in function 'dofile'
...ene/distro/install/lib/luarocks/rocks/trepl/scm-1/bin/th:150: in main chunk
[C]: at 0x563fabe66570
Here is the code (gap2.lua):
require 'nn'
require 'cunn'
require 'cutorch'
local GapBranch, Parent = torch.class('nn.GapBranch', 'nn.Module')
function GapBranch:__init(label, num_classes, args, threshold)
Parent.__init(self)
self.gt_labels = label
num_classes = num_classes ~= nil and num_classes or 10
self.threshold = threshold or 0.6
self.gapbranch = nn.Sequential()
self.gapbranch:add(nn.SpatialConvolution(3,512, 3, 3, 1, 1, 1, 1)) -- cette ligne est a enlever
self.cls = self:classifier(512, num_classes)
self.cls_erase = self:classifier(512, num_classes)
self.gapbranch:add(nn.Concat():add(self.cls):add(self.cls_erase))
--self.gapbranch:add(self.cls_erase)
--Optimizer
self.loss_cross_entropy = nn.CrossEntropyCriterion():cuda()
end
function GapBranch:classifier(in_planes, out_planes)
gapcnn = nn.Sequential()
gapcnn:add(nn.SpatialConvolution(in_planes, 1024, 3, 3, 1, 1, 1, 1))
gapcnn:add(nn.ReLU())
gapcnn:add(nn.SpatialConvolution(1024, 1024, 3, 3, 1, 1, 1, 1))
gapcnn:add(nn.ReLU())
gapcnn:add(nn.SpatialConvolution(1024,out_planes, 1, 1, 1,1))
return gapcnn
end
function mulTensor(tensor1, tensor2)
newTensor = torch.Tensor(tensor1:size()):cuda()
for i=1, tensor1:size()[1] do
for j=1, tensor1:size()[2] do
newTensor[{i,j}] = torch.cmul(tensor1[{i,j}],tensor2[{i,1}])
end
end
return newTensor
end
function GapBranch:erase_feature_maps(atten_map_normed, feature_maps, threshold)
if #atten_map_normed:size()>3 then
atten_map_normed = torch.squeeze(atten_map_normed)
end
atten_shape = atten_map_normed:size()
pos = torch.ge(atten_map_normed, threshold)
mask = torch.ones(atten_shape):cuda() -- cuda
mask[pos] = 0.0
m = nn.Unsqueeze(2)
m = m:cuda()
mask = m:forward(mask)
erased_feature_maps = mulTensor(feature_maps,mask) -- Variable
return erased_feature_maps
end
function GapBranch:normalize_atten_maps(atten_map)
atten_shape = atten_map:size()
batch_mins, _ = torch.min(atten_map:view(atten_shape[1],-1),2)
batch_maxs, _ = torch.max(atten_map:view(atten_shape[1],-1),2)
atten_normed = torch.cdiv(atten_map:view(atten_shape[1],-1)-batch_mins:expandAs(atten_map:view(atten_shape[1],-1)), (batch_maxs - batch_mins):expandAs(atten_map:view(atten_shape[1],-1)))
atten_normed = atten_normed:view(atten_shape)
return atten_normed
end
function GapBranch:get_atten_map(feature_maps, gt_labels, normalize)
normalize = normalize or true
label = gt_labels:long()
feature_map_size = feature_maps:size()
batch_size = feature_map_size[1]
atten_map = torch.zeros(feature_map_size[1], feature_map_size[3], feature_map_size[4])
atten_map = atten_map:cuda()
for batch_idx = 1, batch_size do
-- label.data[batch_idx]
--label[batch_idx]
print('label ',label:size())
print('feature_maps ', feature_maps:size())
atten_map[{batch_idx}] = torch.squeeze(feature_maps[{batch_idx,label[batch_idx]}])
end
if normalize then
atten_map = self:normalize_atten_maps(atten_map)
end
return atten_map
end
function GapBranch:gaplayer()
gaplayer = nn.Sequential()
gaplayer:add(nn.SpatialZeroPadding(1, 1, 1 ,1))
gaplayer:add(nn.SpatialAveragePooling(3, 3, 1, 1))
return gaplayer
end
function GapBranch:updateOutput(input) -- need label
-- Backbone
feat = self.gapbranch:get(1):forward(input)
self.gap = self:gaplayer()
self.gap:cuda()
feat3 = self.gap:forward(feat)
m = nn.Unsqueeze(2)
m = m:cuda()
-- Branch A
out = self.gapbranch:get(2):get(1):forward(feat3)
self.map1 = out
logits_1 = torch.squeeze(torch.mean(torch.mean(out, 3), 4))
logits_1 = m:forward(logits_1)
print('logits_1 ',logits_1:size())
--feat5 = self.gapbranch:get(2):get(2):forward(feat3)
localization_map_normed = self:get_atten_map(out, self.gt_labels, true)
self.attention = localization_map_normed
feat_erase = self:erase_feature_maps(localization_map_normed, feat3, self.threshold)
-- Branch B
out_erase = self.gapbranch:get(2):get(2):forward(feat_erase)
self.map_erase = out_erase
logits_ers = torch.squeeze(torch.mean(torch.mean(out_erase, 3), 4))
m = nn.Unsqueeze(2)
m = m:cuda()
logits_ers = m:forward(logits_ers)
print('logits_ers ', logits_ers:size())
return {logits_1, logits_ers}
end
function GapBranch:get_loss(resModele, gt_labels)
--[[ if self.onehot == 'True' then
gt = gt_labels:float()
else
gt = gt_labels:long()
end
--]]
print('resModele ', resModele[1])
loss_cls = self.loss_cross_entropy:forward(resModele[1], gt_labels)
loss_cls_ers = self.loss_cross_entropy:forward(resModele[2], gt_labels)
loss_val = loss_cls + loss_cls_ers
return {loss_val, }
end
require 'paths'
if (not paths.filep("cifar10torchsmall.zip")) then
os.execute('wget -c https://s3.amazonaws.com/torch7/data/cifar10torchsmall.zip')
os.execute('unzip cifar10torchsmall.zip')
end
trainset = torch.load('cifar10-train.t7')
testset = torch.load('cifar10-test.t7')
classes = {'airplane', 'automobile', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}
-- ignore setmetatable for now, it is a feature beyond the scope of this tutorial. It sets the index operator.
setmetatable(trainset,
{__index = function(t, i)
return {t.data[i], t.label[i]}
end}
);
trainset.data = trainset.data:double() -- convert the data from a ByteTensor to a DoubleTensor.
function trainset:size()
return self.data:size(1)
end
mean = {} -- store the mean, to normalize the test set in the future
stdv = {} -- store the standard-deviation for the future
for i=1,3 do -- over each image channel
mean[i] = trainset.data[{ {}, {i}, {}, {} }]:mean() -- mean estimation
print('Channel ' .. i .. ', Mean: ' .. mean[i])
trainset.data[{ {}, {i}, {}, {} }]:add(-mean[i]) -- mean subtraction
stdv[i] = trainset.data[{ {}, {i}, {}, {} }]:std() -- std estimation
print('Channel ' .. i .. ', Standard Deviation: ' .. stdv[i])
trainset.data[{ {}, {i}, {}, {} }]:div(stdv[i]) -- std scaling
end
trainset.data = trainset.data:cuda()
trainset.label = trainset.label:cuda()
modele = nn.GapBranch(trainset.label):cuda()
modele.gapbranch = modele.gapbranch:cuda()
print(modele.gapbranch)
theta, gradTheta = modele.gapbranch:getParameters()
optimState = {learningRate = 0.15}
require 'optim'
for epoch = 1, 1 do
function feval(theta)
for i=1, 1 do
modele.gapbranch:zeroGradParameters()
m = nn.Unsqueeze(1)
m = m:cuda()
n = m:forward(trainset.data[i])
h = modele:forward(n)
j = modele:get_loss(h,trainset.label[i])
loss_cls_grad = modele.loss_cross_entropy:backward(h[1],trainset.label[i])
loss_cls_ers_grad = modele.loss_cross_entropy:backward(h[2],trainset.label[i])
loss_grad = loss_cls_grad + loss_cls_ers_grad
loss_grad = torch.randn(1,10,32,32):cuda()
modele.gapbranch:backward(n, loss_grad)
end
return j, gradTheta
end
print('***************************')
optim.sgd(feval, theta, optimState)
end
If anyone could help i would be very grateful
Related
I was playing around with Lua and neural networks, and I stumbled across a weird bug in my code, where Lua appears to be treating a table as a number. I have the following code...
function CreateEmptyNeuron()
local neuron = {
val = 0,
forward = { },
backward = { },
}
return neuron;
end
--layerSize is an integer representing the size of the layer we're creating.
function CreateLayer(layerSize)
local layer = {}
for i = 1, layerSize, 1 do
local n = CreateEmptyNeuron();
table.insert(layer, n)
end
return layer;
end
--layerSize is actually an array of integers representing the array sizes of
--each individual layer (first layer is the input layer, last layer is output layer)
function CreateLayers(numLayers, layerSize)
local layers = {}
for i = 1, numLayers, 1 do
local layer = CreateLayer(layerSize[i]);
table.insert(layers, layer)
end
return layers;
end
--This function initializes the "val" variable in each table, and
--forward connects each neuron to every node in the next layer...
function ForwardConnectLayers(network)
for i = 1, #network, 1 do
local layer = network[i]
local next_layer = nil
if (i+1) < #network then
next_layer = network[i+1]
else
print("We have reached the output layer...")
next_layer = nil
end
for j = 1, #layer, 1 do
local neuron = layer[j]
neuron.val = (math.random() + math.random(0, 100))/100;
if next_layer ~= nil then
for x = 1, #next_layer, 1 do
neuron.forward[x] = math.random(1, 100)/100
print("Found forward layer...")
end
else
print("We cannot forward connect the output layer...\n")
end
end
end
end
function BackwardConnectLayers(network)
for i = 1, #network, 1 do
local layer = network[i]
local prev_layer = nil
if (i-1) > 1 then
prev_layer = network[i-1]
else
print("We have reached the input layer...")
prev_layer = nil
end
for j = #layer, 1, -1 do
local neuron = layer[j]
--neuron.val = (math.random() + math.random(0, 100))/100;
if prev_layer ~= nil then
for x = 1, #prev_layer, 1 do
table.insert(neuron.backward, prev_layer[x])
print("Found input layer...")
end
else
print("We cannot backward connect the input layer...\n")
end
end
end
end
function CreateBrain()
local LAYER_SIZES = {
10, 20, 20, 5
}
local brain = CreateLayers(4, LAYER_SIZES)
ForwardConnectLayers(brain)
BackwardConnectLayers(brain)
return brain;
end
AI = CreateBrain();
AI.Run = function(inputs, expectedOutputs)
local input_layer = AI[1]
local output_layer = AI[#AI]
for i = 0, #inputs, 1 do
input_layer[i] = inputs[i]
end
--For each layer in the network...
for l = 1, #AI, 1 do
--Get the next layer and this layer...
local this_layer = AI[l]
local next_layer = AI[l+1]
--And for each neuron in the next layer...
--Multiply the value of the neuron in this layer by
--the value of the modifier, and set the value of the next
--Neuron to be nextneuron.val + sum(<x>.val*<x>[k])
if next_layer ~= nil then
for m = 1, #next_layer, 1 do
local prev_layer_sum = 0
for n = 1, #this_layer, 1 do
local neuron = this_layer[n]
print(neuron)
end
end
end
end
end
ai_inputs = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }
expected_outputs = { 8, 8, 8, 8, 8 }
AI.Run(ai_inputs, expected_outputs)
When I run the code as shown, it, as expected, prints a list of addresses to memory for Lua tables:
But then, if I alter AI.Run to the following:
AI.Run = function(inputs, expectedOutputs)
local input_layer = AI[1]
local output_layer = AI[#AI]
for i = 0, #inputs, 1 do
input_layer[i] = inputs[i]
end
--For each layer in the network...
for l = 1, #AI, 1 do
--Get the next layer and this layer...
local this_layer = AI[l]
local next_layer = AI[l+1]
--And for each neuron in the next layer...
--Multiply the value of the neuron in this layer by
--the value of the modifier, and set the value of the next
--Neuron to be nextneuron.val + sum(<x>.val*<x>[k])
if next_layer ~= nil then
for m = 1, #next_layer, 1 do
local prev_layer_sum = 0
for n = 1, #this_layer, 1 do
local neuron = this_layer[n]
for k, v in pairs(neuron) do
print(k .. ", " .. v)
end
end
end
end
end
end
I get the following error message:
Indicating that Lua thinks the table is a number... which is, suffice to say, confusing. There's probably some language technicality defining this behavior, but I can't find good documentation surrounding this (in part because I don't even know what term to search; I'm not usually a Lua programmer).
EDIT:
So, it appears, somewhere in the code, numbers are getting inserted into the layers...
(Link to online Lua compiler where you can run the code: http://tpcg.io/_WPRPQV)
Your AI.Run function does
local input_layer = AI[1]
for i = 0, #inputs, 1 do
input_layer[i] = inputs[i]
end
where inputs is a table of numbers, i.e. the first layer of your network is replaced by just numbers.
You probably want to replace the val of the neuron in that layer instead:
local input_layer = AI[1]
for i = 0, #inputs, 1 do
input_layer[i].val = inputs[i]
end
When using the ReLU activation function in my Neural Network I noticed all the outputs were the same. The Sigmoid activation function works just fine. Is there something wrong with the ReLU function? Why is this happening? What can I do to fix this?
The example is a simple XOR Neural Network written in Lua.
math.randomseed(os.time())
local nn = require("NeuralNetwork")
network = nn.new(2,1,1,4,0.2, "ReLU")
local x=0
local attempts = 100000
for i = 1,attempts do
x+=1
if x > 10000 then
wait()
x=0
end
network:backPropagate({0, 0}, {0})
network:backPropagate({1, 0}, {1})
network:backPropagate({0, 1}, {1})
network:backPropagate({1, 1}, {0})
end
print("0 0 | "..network:feedForward({0,0})[1])
print("1 0 | "..network:feedForward({1,0})[1])
print("0 1 | "..network:feedForward({0,1})[1])
print("1 1 | "..network:feedForward({1,1})[1])
Output:
0 0 | 0.48780487804878037
1 0 | 0.48780487804878037
0 1 | 0.48780487804878037
1 1 | 0.48780487804878037
- Library -
local nn = {}
nn.__index = nn
nn.ActivationFunctions = {
sigmoid = function(x) return 1/(1+math.exp(-x)) end,
ReLU = function(x) return math.max(0, x) end,
}
nn.Derivatives = {
sigmoid = function(x) return x * (1 - x) end,
ReLU = function(x) return x >= 0 and 1 or 0 end,
}
nn.CostFunctions = {
MSE = function(outputs, expected)
local sum = 0
for i = 1, #outputs do
sum += (expected[i] - outputs[i])^2
end
return sum/#outputs
end,
}
function nn.new(inputs, outputs, hiddenLayers, neurons, learningRate, activationFunction)
local self = setmetatable({}, nn)
self.learningRate = learningRate or .3
self.activationFunction = activationFunction or "ReLu"
self.net = {}
local net = self.net
local layers = hiddenLayers+2
for i = 1, layers do
net[i] = {}
end
for i = 1, inputs do
net[1][i] = {value = 0}
end
for i = 2, layers-1 do
for x = 1, neurons do
net[i][x] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[i-1] do
net[i][x].weights[z] = math.random()*2-1
end
end
end
for i = 1, outputs do
net[layers][i] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[layers-1] do
net[layers][i].weights[z] = math.random()*2-1
end
end
return self
end
function nn.newFromRawData(data)
return setmetatable(data, nn)
end
function nn:feedForward(inputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #inputLayer do
inputLayer[i].value = inputs[i]
end
for i = 2, layers do
local layer = net[i]
for x = 1, #layer do
local sum = layer[x].bias
for z = 1, #net[i-1] do
sum += net[i-1][z].value * layer[x].weights[z]
end
layer[x].netInput = sum
layer[x].value = nn.ActivationFunctions[activation](sum)
end
end
local outputs = {}
for i = 1, #outputLayer do
table.insert(outputs, outputLayer[i].value)
end
return outputs
end
function nn:backPropagate(inputs, expected)
local outputs = self:feedForward(inputs)
--print(outputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local lr = self.learningRate
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #outputLayer do
local delta = -(expected[i] - outputs[i]) * nn.Derivatives[activation](outputs[i])
outputLayer[i].delta = delta
end
for i = layers-1, 2, -1 do
local layer = net[i]
local nextLayer = net[i+1]
for x = 1, #layer do
local delta = 0
for z = 1, #nextLayer do
delta += nextLayer[z].delta * nextLayer[z].weights[x]
end
layer[x].delta = delta * nn.Derivatives[activation](layer[x].value)
end
end
for i = 2, layers do
local lastLayer = net[i-1]
for x = 1, #net[i] do
net[i][x].bias -= lr * net[i][x].delta
for z = 1, #lastLayer do
net[i][x].weights[z] -= lr * net[i][x].delta * lastLayer[z].value
end
end
end
end
return nn
I'm trying to train a feed forward neural network for the first time in torch. Here's my dataset: http://ocw.mit.edu/courses/sloan-school-of-management/15-097-prediction-machine-learning-and-statistics-spring-2012/datasets/transfusion.csv
Here's the code (based, http://mdtux89.github.io/2015/12/11/torch-tutorial.html):
require 'nn'
mlp = nn.Sequential()
inputSize = 4
hiddenLayer1Size = 4
hiddenLayer2Size = 4
mlp:add(nn.Linear(inputSize,hiddenLayer1Size)) -- row, coulm
mlp:add(nn.Tanh())
mlp:add(nn.Linear(hiddenLayer1Size,hiddenLayer2Size))
mlp:add(nn.Tanh())
nclasses = 1
mlp:add(nn.Linear(hiddenLayer2Size,nclasses))
mlp:add(nn.LogSoftMax())
output = mlp:forward(torch.rand(1,4))
print(output)
-- TRAINING using inbuilt stochastic gradient descent, 2 params: network, criterian fun. --
LRate = 0.1
criterion = nn.ClassNLLCriterion()
trainer = nn.StochasticGradient(mlp, criterion)
trainer.learningRate = LRate
function string:splitAtCommas()
local sep, values = ",", {}
local pattern = string.format("([^%s]+)", sep)
self:gsub(pattern, function(c) values[#values+1] = c end)
return values
end
function loadData(dataFile)
local dataset,i = {},0
for line in io.lines(dataFile) do
local values = line:splitAtCommas()
local y = torch.Tensor(1)
y[1] = values[#values] -- the target class is the last number in the line
values[#values] = nil
local x = torch.Tensor(values) -- the input data is all the other numbers
dataset[i] = {x, y}
i = i + 1
end
function dataset:size() return (i - 1) end -- the requirement mentioned
return dataset
end
dataset = loadData("transfusion.csv")
trainer:train(dataset)
Here's the error report:
# StochasticGradient: training
/Users/drdre/torch/install/share/lua/5.1/nn/THNN.lua:109: Assertion `cur_target >= 0 && cur_target < n_classes' failed. at /Users/drdre/torch/extra/nn/lib/THNN/generic/ClassNLLCriterion.c:38
stack traceback:
[C]: in function 'v'
/Users/drdre/torch/install/share/lua/5.1/nn/THNN.lua:109: in function 'ClassNLLCriterion_updateOutput'
...dre/torch/install/share/lua/5.1/nn/ClassNLLCriterion.lua:41: in function 'forward'
...re/torch/install/share/lua/5.1/nn/StochasticGradient.lua:35: in function 'f'
[string "local f = function() return trainer:train(dat..."]:1: in main chunk
[C]: in function 'xpcall'
/Users/drdre/torch/install/share/lua/5.1/itorch/main.lua:209: in function </Users/drdre/torch/install/share/lua/5.1/itorch/main.lua:173>
/Users/drdre/torch/install/share/lua/5.1/lzmq/poller.lua:75: in function 'poll'
/Users/drdre/torch/install/share/lua/5.1/lzmq/impl/loop.lua:307: in function 'poll'
/Users/drdre/torch/install/share/lua/5.1/lzmq/impl/loop.lua:325: in function 'sleep_ex'
/Users/drdre/torch/install/share/lua/5.1/lzmq/impl/loop.lua:370: in function 'start'
/Users/drdre/torch/install/share/lua/5.1/itorch/main.lua:381: in main chunk
[C]: in function 'require'
(command line):1: in main chunk
[C]: at 0x0105e4cd10
Use nclasses = 2 and y[1] = values[#values] + 1. See the doc:
a desired output y (an integer 1 to n, in this case n = 2 classes)
require 'torch';
require 'nn';
require 'nnx';
mnist = require 'mnist';
fullset = mnist.traindataset()
testset = mnist.testdataset()
trainset = {
size = 50000,
data = fullset.data[{{1,50000}}]:double(),
label = fullset.label[{{1,50000}}]
}
validationset = {
size = 10000,
data = fullset.data[{{50001, 60000}}]:double(),
label = fullset.label[{{50001,60000}}]
}
-- MNIST Dataset has 28x28 images
model = nn.Sequential()
model:add(nn.SpatialConvolutionMM(1, 32, 5, 5)) -- 32x24x24
model:add(nn.ReLU())
model:add(nn.SpatialMaxPooling(3, 3, 3, 3)) -- 32x8x8
model:add(nn.SpatialConvolutionMM(32, 64, 5, 5)) -- 64x4x4
model:add(nn.Tanh())
model:add(nn.SpatialMaxPooling(2, 2, 2, 2)) -- 64x2x2
model:add(nn.Reshape(64*2*2))
model:add(nn.Linear(64*2*2, 200))
model:add(nn.Tanh())
model:add(nn.Linear(200, 10))
model:add(nn.LogSoftMax())
criterion = nn.ClassNLLCriterion()
x, dldx = model:getParameters() -- now x stores the trainable parameters and dldx stores the gradient wrt these params in the model above
sgd_params = {
learningRate = 1e-2,
learningRateDecay = 1e-4,
weightDecay = 1e-3,
momentum = 1e-4
}
step = function ( batchsize )
-- setting up variables
local count = 0
local current_loss = 0
local shuffle = torch.randperm(trainset.size)
-- setting default batchsize as 200
batchsize = batchsize or 200
-- setting inputs and targets for minibatches
for minibatch_number = 1, trainset.size, batchsize do
local size = math.min( trainset.size - minibatch_number + 1, batchsize )
local inputs = torch.Tensor(size, 28, 28)
local targets = torch.Tensor(size)
for index = 1, size do
inputs[index] = trainset.data[ shuffle[ index + minibatch_number ]]
targets[index] = trainset.label[ shuffle[ index + minibatch_number ] ]
end
-- defining feval function to return loss and gradients of loss w.r.t. params
feval = function( x_new )
--print ( "---------------------------------safe--------------------")
if x ~= x_new then x:copy(x_new) end
-- initializing gradParsams to zero
dldx:zero()
-- calculating loss and param gradients
local loss = criterion:forward( model.forward( inputs ), targets )
model:backward( inputs, criterion:backward( model.output, targets ) )
return loss, dldx
end
-- getting loss
-- optim returns x*, {fx} where x* is new set of params and {fx} is { loss } => fs[ 1 ] carries loss from feval
print(feval ~= nil and x ~= nil and sgd_params ~= nil)
_,fs = optim.sgd(feval, x, sgd_params)
count = count + 1
current_loss = current_loss + fs[ 1 ]
end
--returning avg loss over the minibatch
return current_loss / count
end
max_iters = 30
for i = 1 ,max_iters do
local loss = step()
print(string.format('Epoch: %d Current loss: %4f', i, loss))
end
I am new to torch and lua and I'm not able to find an error in the above code. Can anyone suggest a way to debug it?
The error:
/home/afroz/torch/install/bin/luajit: /home/afroz/test.lua:88: attempt to index global 'optim' (a nil value)
stack traceback:
/home/afroz/test.lua:88: in function 'step'
/home/afroz/test.lua:102: in main chunk
[C]: in function 'dofile'
...froz/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:145: in main chunk
[C]: at 0x00406670
optim is not defined in the scope of your script. You try to call optim.sgd which of course results in the error you see.
Like nn, optim is a extension package to torch.
require 'torch';
require 'nn';
require 'nnx';
Remember those lines in the beginning of your script? They basically execute the definition of those packages.
Make sure optim is installed, then try to require it.
https://github.com/torch/optim
optim is not assigned anywhere in the script, so when the script references optim.sgd, its value is nil and you get the error you shown. You need to doublecheck the script to make sure the optim is assigned the correct value.
I'm trying to edit a lua script to add on my Garry's Mod Server, but I get this error and I dont know what to do.
Error:
[ERROR] --/sh_worlditemspawner.lua:56: attempt to index local 'v' (a nil value)
Code:
local PLUGIN = PLUGIN
PLUGIN.name = "World Item Spawner"
PLUGIN.author = "Black Tea"
PLUGIN.desc = "World Item Spawner."
PLUGIN.itempoints = PLUGIN.itempoints or {}
PLUGIN.spawngroups = {
["default"] = {
{"bleach"},
},
["example"] = {
{"ration"},
},
["junks"] = {
{"junk_ws"},
{"junk_wj"},
{"junk_be"},
{"junk_bt"},
{"junk_p"},
{"junk_ss"},
{"junk_bl"},
{"junk_k"},
{"junk_p"},
{"junk_hp"},
{"junk_ec"},
{"junk_ej"},
}
}
PLUGIN.spawnrate = 30
PLUGIN.maxitems = 10
PLUGIN.itemsperspawn = 2
PLUGIN.spawneditems = PLUGIN.spawneditems or {}
if SERVER then
local spawntime = 1
function PLUGIN:Think()
if spawntime > CurTime() then return end
spawntime = CurTime() + self.spawnrate
for k, v in ipairs(self.spawneditems) do
if (!v:IsValid()) then
table.remove(self.spawneditems, k)
end
end
if #self.spawneditems >= self.maxitems then return end
for i = 1, self.itemsperspawn do
if #self.spawneditems >= self.maxitems then return end
local v = table.Random(self.itempoints)
if #self.spawneditems > self.maxitems then
return
end
local data = {}
data.start = v[1]
data.endpos = data.start + Vector(0, 0, 1)
data.filter = client
data.mins = Vector(-16, -16, 0)
data.maxs = Vector(16, 16, 16)
local trace = util.TraceHull(data)
if trace.Entity:IsValid() then
continue
end
local idat = table.Random(self.spawngroups[v[2]]) or self.spawngroup["default"]
local item = nut.item.Spawn(v[1] + Vector( math.Rand(-8,8), math.Rand(-8,8), 10 ), AngleRand(), idat[1], idat[2] or {})
table.insert( self.spawneditems, item )
end
end
function PLUGIN:LoadData()
self.itempoints = nut.util.ReadTable("itempoints")
end
function PLUGIN:SaveData()
for k, v in ipairs(self.spawneditems) do
v:Remove()
end
nut.util.WriteTable("itempoints", self.itempoints)
end
else
netstream.Hook("nut_DisplaySpawnPoints", function(data)
for k, v in pairs(data) do
local emitter = ParticleEmitter( v[1] )
local smoke = emitter:Add( "sprites/glow04_noz", v[1] )
smoke:SetVelocity( Vector( 0, 0, 1 ) )
smoke:SetDieTime(10)
smoke:SetStartAlpha(255)
smoke:SetEndAlpha(255)
smoke:SetStartSize(64)
smoke:SetEndSize(64)
smoke:SetColor(255,186,50)
smoke:SetAirResistance(300)
emitter:Finish()
end
end)
end
nut.command.Register({
adminOnly = true,
onRun = function(client, arguments)
local trace = client:GetEyeTraceNoCursor()
local hitpos = trace.HitPos + trace.HitNormal*5
local spawngroup = arguments[1] or "default"
table.insert( PLUGIN.itempoints, { hitpos, spawngroup } )
nut.util.Notify( "You added ".. spawngroup .. " item spawner." )
end
}, "itemspawnadd")
nut.command.Register({
adminOnly = true,
onRun = function(client, arguments)
local trace = client:GetEyeTraceNoCursor()
local hitpos = trace.HitPos + trace.HitNormal*5
local range = arguments[1] or 128
local mt = 0
for k, v in pairs( PLUGIN.itempoints ) do
local distance = v[1]:Distance( hitpos )
if distance <= tonumber(range) then
PLUGIN.itempoints[k] = nil
mt = mt + 1
end
end
nut.util.Notify( mt .. " item spawners has been removed.")
end
}, "itemspawnremove")
nut.command.Register({
adminOnly = true,
onRun = function(client, arguments)
if SERVER then
netstream.Start(client, "nut_DisplaySpawnPoints", PLUGIN.itempoints)
nut.util.Notify( "Displayed All Points for 10 secs." )
end
end
}, "itemspawndisplay")
This is because
table.Random(self.itempoints)
returns nil. Did you mean math.random? If you post code for that table.Random func I can give more info.