I am running 4 classifiers on a dataset and comparing their performance. When I run the following code I receive some weird errors:
library(mlbench)
library(mlr)
maxx_IL10 = c(3199, 2997, 2690, 2482, 2891, 2310, 3180, 3050, 3115, 3052, 3071, 3068, 2611, 2723, 2903, 2969)
auc_INTERLEUKIN_12P70 = c(14809, 1384.5, 760, 10922.5, 3010, 14564, 26496, 1229, 2509, 1474.5, 20697.5, 1854.5, 17352, 1457, 227, 31507.5)
maxx_TNFA = c(3066, 2658, 2697, 3175, 2904, 2260, 2714, 3056, 3155, 3030, 3125, 2456, 3017, 2860, 1704, 3167)
fold_IL4 = c(16.02685, 0, 4.616438, 53.44898, 0, 0, 68.85714, 5.833333, 25.9, 0, 21.87629, 20.57895, 20.18792, 4.394737, 7.723404, 56.6383)
maxx_CD19 = c(3045.5, 3045.5, 3045.5, 3045.5, 2667, 1865, 3126, 2432, 3244, 3218, 2415, 3077, 3223, 2549, 3016, 3244)
auc_IL4 = c(18315.5, 0, 1348, 31112, 0, 0, 19182.5, 525, 3201.5, 0, 12976, 782, 19195.5, 835, 544.5, 26658)
Class = c("B", "A", "A", "A", "A", "A", "B", "A", "B", "B", "B", "A", "B", "A", "A", "B")
df = data.frame(maxx_IL10, auc_INTERLEUKIN_12P70, maxx_TNFA, fold_IL4, maxx_CD19, auc_IL4, Class)
Class.task = makeClassifTask( data = df, target = "Class", positive ="B")
fv = generateFilterValuesData(Class.task, method = "mrmr")
plotFilterValues(fv)
filtered.task = filterFeatures(Class.task, fval = fv, threshold = -.2)
n = getTaskSize(filtered.task)
train.set = sample(n, size = round(2/3 * n))
test.set = setdiff(seq_len(n), train.set)
lrn1 = makeLearner("classif.lda", predict.type = "prob")
mod1 = train(lrn1, filtered.task, subset = train.set)
pred1 = predict(mod1, task = filtered.task, subset = test.set)
lrn2 = makeLearner("classif.ksvm", predict.type = "prob")
mod2 = train(lrn2, filtered.task, subset = train.set)
pred2 = predict(mod2, task = filtered.task, subset = test.set)
lrn3 = makeLearner("classif.randomForest", predict.type = "prob")
mod3 = train(lrn3, Class.task, subset = train.set)
pred3 = predict(mod3, task = Class.task, subset = test.set)
lrn5 = makeLearner("classif.xgboost", predict.type = "prob")
mod5 = train(lrn5, Class.task, subset = train.set)
pred5 = predict(mod5, task = Class.task, subset = test.set)
### Tune wrapper for ksvm
rdesc.inner = makeResampleDesc("Holdout")
ms = list(auc, mmce)
ps = makeParamSet(
makeDiscreteParam("C", 2^(-1:1))
)
ctrl = makeTuneControlGrid()
lrn2 = makeTuneWrapper(lrn2, rdesc.inner,ms, ps, ctrl, show.info = FALSE)
lrns = list(lrn1, lrn2,lrn3,lrn5)
rdesc.outer = makeResampleDesc("CV", iters = 5)
bmr = benchmark(lrns, tasks = filtered.task, resampling = rdesc.outer, measures = ms, show.info = FALSE)
bmr
The errors that I receive are:
lrn2 = makeTuneWrapper(lrn2, rdesc.inner,ms, ps, ctrl, show.info = FALSE)
Error in makeTuneWrapper(lrn2, rdesc.inner, ms, ps, ctrl, show.info = FALSE) :
Assertion on 'measures' failed: May only contain the following types: Measure.
lrns = list(lrn1, lrn2,lrn3,lrn5)
rdesc.outer = makeResampleDesc("CV", iters = 5)
bmr = benchmark(lrns, tasks = filtered.task, resampling = rdesc.outer, measures = ms, show.info = FALSE)
Error in FUN(X[[i]], ...) :
List measures has element of wrong type function at position 1. Should be: Measure
bmr
Error: object 'bmr' not found
Any ideas on what I am doing wrong? Thank you!!
Related
I've been trying figure out what I have done wrong for many hours, but just can't figure out. I've even looked at other basic Neural Network libraries to make sure that my gradient descent algorithms were correct, but it still isn't working.
I'm trying to teach it XOR but it outputs -
input (0 0) | 0.011441891321516094
input (1 0) | 0.6558508610135193
input (0 1) | 0.6558003273099053
input (1 1) | 0.6563021185296245
after 1000 trainings, so clearly there's something wrong.
The code is written in lua and I created the Neural Network from raw data so you can easily understand how the data is formatted.
- Training code -
math.randomseed(os.time())
local nn = require("NeuralNetwork")
local network = nn.newFromRawData({
["activationFunction"] = "sigmoid",
["learningRate"] = 0.3,
["net"] = {
[1] = {
[1] = {
["value"] = 0
},
[2] = {
["value"] = 0
}
},
[2] = {
[1] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
},
[2] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
},
[3] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
},
[4] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1
}
}
},
[3] = {
[1] = {
["bias"] = 1,
["netInput"] = 0,
["value"] = 0,
["weights"] = {
[1] = 1,
[2] = 1,
[3] = 1,
[4] = 1
}
}
}
}
})
attempts = 1000
for i = 1,attempts do
network:backPropagate({0,0},{0})
network:backPropagate({1,0},{1})
network:backPropagate({0,1},{1})
network:backPropagate({1,1},{0})
end
print("Results:")
print("input (0 0) | "..network:feedForward({0,0})[1])
print("input (1 0) | "..network:feedForward({1,0})[1])
print("input (0 1) | "..network:feedForward({0,1})[1])
print("input (1 1) | "..network:feedForward({1,1})[1])
- Library -
local nn = {}
nn.__index = nn
nn.ActivationFunctions = {
sigmoid = function(x) return 1/(1+math.exp(-x/1)) end,
ReLu = function(x) return math.max(0, x) end,
}
nn.Derivatives = {
sigmoid = function(x) return x * (1 - x) end,
ReLu = function(x) return x > 0 and 1 or 0 end,
}
nn.CostFunctions = {
MSE = function(outputs, expected)
local sum = 0
for i = 1, #outputs do
sum += 1/2*(expected[i] - outputs[i])^2
end
return sum/#outputs
end,
}
function nn.new(inputs, outputs, hiddenLayers, neurons, learningRate, activationFunction)
local self = setmetatable({}, nn)
self.learningRate = learningRate or .3
self.activationFunction = activationFunction or "ReLu"
self.net = {}
local net = self.net
local layers = hiddenLayers+2
for i = 1, layers do
net[i] = {}
end
for i = 1, inputs do
net[1][i] = {value = 0}
end
for i = 2, layers-1 do
for x = 1, neurons do
net[i][x] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[i-1] do
net[i][x].weights[z] = math.random()*2-1
end
end
end
for i = 1, outputs do
net[layers][i] = {netInput = 0, value = 0, bias = math.random()*2-1, weights = {}}
for z = 1, #net[layers-1] do
net[layers][i].weights[z] = math.random()*2-1
end
end
return self
end
function nn.newFromRawData(data)
return setmetatable(data, nn)
end
function nn:feedForward(inputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #inputLayer do
inputLayer[i].value = inputs[i]
end
for i = 2, layers do
local layer = net[i]
for x = 1, #layer do
local sum = layer[x].bias
for z = 1, #net[i-1] do
sum += net[i-1][z].value * layer[x].weights[z]
end
layer[x].netInput = sum
layer[x].value = nn.ActivationFunctions[activation](sum)
end
end
local outputs = {}
for i = 1, #outputLayer do
table.insert(outputs, outputLayer[i].value)
end
return outputs
end
function nn:backPropagate(inputs, expected)
local outputs = self:feedForward(inputs)
local net = self.net
local activation = self.activationFunction
local layers = #net
local lr = self.learningRate
local inputLayer = net[1]
local outputLayer = net[layers]
for i = 1, #outputLayer do
local delta = -(expected[i] - outputs[i]) * nn.Derivatives[activation](net[layers][i].value)
outputLayer[i].delta = delta
end
for i = layers-1, 2, -1 do
local layer = net[i]
local nextLayer = net[i+1]
for x = 1, #layer do
local delta = 0
for z = 1, #nextLayer do
delta += nextLayer[z].delta * nextLayer[z].weights[x]
end
layer[x].delta = delta * nn.Derivatives[activation](layer[x].value)
end
end
for i = 2, layers do
local lastLayer = net[i-1]
for x = 1, #net[i] do
net[i][x].bias -= lr * net[i][x].delta
for z = 1, #lastLayer do
net[i][x].weights[z] -= lr * net[i][x].delta * lastLayer[z].value
end
end
end
end
return nn
Any help would be highly appreciated, thanks!
All initial weights must be DIFFERENT numbers, otherwise backpropagation will not work. For example, you can replace 1 with math.random()
Increase number of attempts to 10000
With these modifications, your code works fine:
Results:
input (0 0) | 0.028138230938126
input (1 0) | 0.97809448578087
input (0 1) | 0.97785000216126
input (1 1) | 0.023128477689456
I am trying to train a special type of GAN called a Model-Assisted GAN (https://arxiv.org/pdf/1812.00879) using Keras, which takes as an input a vector of 13 input parameters + Gaussian noise, and generate a vector of 6 outputs. The mapping between the vectors of inputs and outputs is non-trivial, as it is related to a high energy physics simulation (in particular the simulation has some inherent randomness). The biggest dependence on the final outputs are encoded in the first five inputs. The biggest difference for this model to a traditional GAN is the use of a Siamese network as the discriminator, and this takes two inputs at a time, so for the same input parameters we provide two sets of possible outputs per training (possible due to the randomness of the simulation), so there are sort of 12 output distributions, but only 6 are unique, which is what we aim to generate. We used a 1D convolutional neural network for both the discriminator and generator.
The current model we have trained seems to reproduce the output distributions for an independent testing sample to reasonably good accuracy (see below plot of histograms overlayed), but there are still some clear differences between the distributions, and the eventual goal is for the model to be able to produce indistinguishable data to the simulation. I have so far tried varying the learning rate and added varying amounts of learning rate decay, tweaking the network architectures, changing some of the hyperparameters of the optimiser, adding some more noise to the discriminator training by implementing some label smoothing and swapping the order of inputs, adding some label smoothing to the generator training, increasing the batch size and also increasing the amount of noise inputs, and I still cannot get the model to perfectly reproduce the output distributions. I am struggling to come up with ideas of what to do next, and I was wondering if anyone else has had a similar problem, whereby the output is not quite perfect, and if so how they might have gone about solving this problem? Any thoughts or tips would be greatly appreciated!
I have included the full code for the training, as well as some plots of the input and output distributions (before applying the Quantile Transformer), the loss plots for the adversarial network and the discriminator (A for Adversarial, S for Siamese (Discriminator)) and then the overlay of the histograms for the generated and true output distributions for the independent testing sample (which is where you can see the small differences that arise).
Thanks in advance.
TRAINING CODE
"""
Training implementation
"""
net_range = [-1,1]
gauss_range = [-5.5,5.5]
mapping = interp1d(gauss_range, net_range)
class ModelAssistedGANPID(object):
def __init__(self, params=64, observables=6):
self.params = params
self.observables = observables
self.Networks = Networks(params=params, observables=observables)
self.siamese = self.Networks.siamese_model()
self.adversarial1 = self.Networks.adversarial1_model()
def train(self, pretrain_steps=4500, train_steps=100000, batch_size=32, train_no=1):
print('Pretraining for ', pretrain_steps,' steps before training for ', train_steps, ' steps')
print('Batch size = ', batch_size)
print('Training number = ', train_no)
'''
Pre-training stage
'''
# Number of tracks for the training + validation sample
n_events = 1728000 + 100000
n_train = n_events - 100000
# Parameters for Gaussian noise
lower = -1
upper = 1
mu = 0
sigma = 1
# import simulation data
print('Loading data...')
kaon_data = pd.read_hdf('PATH')
kaon_data = kaon_data.sample(n=n_events)
kaon_data = kaon_data.reset_index(drop=True)
kaon_data_train = kaon_data[:n_train]
kaon_data_test = kaon_data[n_train:n_events]
print("Producing training data...")
# add all inputs
P_kaon_data_train = kaon_data_train['TrackP']
Pt_kaon_data_train = kaon_data_train['TrackPt']
nTracks_kaon_data_train = kaon_data_train['NumLongTracks']
numRich1_kaon_data_train = kaon_data_train['NumRich1Hits']
numRich2_kaon_data_train = kaon_data_train['NumRich2Hits']
rich1EntryX_kaon_data_train = kaon_data_train['TrackRich1EntryX']
rich1EntryY_kaon_data_train = kaon_data_train['TrackRich1EntryY']
rich1ExitX_kaon_data_train = kaon_data_train['TrackRich1ExitX']
rich1ExitY_kaon_data_train = kaon_data_train['TrackRich1ExitY']
rich2EntryX_kaon_data_train = kaon_data_train['TrackRich2EntryX']
rich2EntryY_kaon_data_train = kaon_data_train['TrackRich2EntryY']
rich2ExitX_kaon_data_train = kaon_data_train['TrackRich2ExitX']
rich2ExitY_kaon_data_train = kaon_data_train['TrackRich2ExitY']
# add different DLL outputs
Dlle_kaon_data_train = kaon_data_train['RichDLLe']
Dlle2_kaon_data_train = kaon_data_train['RichDLLe2']
Dllmu_kaon_data_train = kaon_data_train['RichDLLmu']
Dllmu2_kaon_data_train = kaon_data_train['RichDLLmu2']
Dllk_kaon_data_train = kaon_data_train['RichDLLk']
Dllk2_kaon_data_train = kaon_data_train['RichDLLk2']
Dllp_kaon_data_train = kaon_data_train['RichDLLp']
Dllp2_kaon_data_train = kaon_data_train['RichDLLp2']
Dlld_kaon_data_train = kaon_data_train['RichDLLd']
Dlld2_kaon_data_train = kaon_data_train['RichDLLd2']
Dllbt_kaon_data_train = kaon_data_train['RichDLLbt']
Dllbt2_kaon_data_train = kaon_data_train['RichDLLbt2']
# convert to numpy array
P_kaon_data_train = P_kaon_data_train.to_numpy()
Pt_kaon_data_train = Pt_kaon_data_train.to_numpy()
nTracks_kaon_data_train = nTracks_kaon_data_train.to_numpy()
numRich1_kaon_data_train = numRich1_kaon_data_train.to_numpy()
numRich2_kaon_data_train = numRich2_kaon_data_train.to_numpy()
rich1EntryX_kaon_data_train = rich1EntryX_kaon_data_train.to_numpy()
rich1EntryY_kaon_data_train = rich1EntryY_kaon_data_train.to_numpy()
rich1ExitX_kaon_data_train = rich1ExitX_kaon_data_train.to_numpy()
rich1ExitY_kaon_data_train = rich1ExitY_kaon_data_train.to_numpy()
rich2EntryX_kaon_data_train = rich2EntryX_kaon_data_train.to_numpy()
rich2EntryY_kaon_data_train = rich2EntryY_kaon_data_train.to_numpy()
rich2ExitX_kaon_data_train = rich2ExitX_kaon_data_train.to_numpy()
rich2ExitY_kaon_data_train = rich2ExitY_kaon_data_train.to_numpy()
Dlle_kaon_data_train = Dlle_kaon_data_train.to_numpy()
Dlle2_kaon_data_train = Dlle2_kaon_data_train.to_numpy()
Dllmu_kaon_data_train = Dllmu_kaon_data_train.to_numpy()
Dllmu2_kaon_data_train = Dllmu2_kaon_data_train.to_numpy()
Dllk_kaon_data_train = Dllk_kaon_data_train.to_numpy()
Dllk2_kaon_data_train = Dllk2_kaon_data_train.to_numpy()
Dllp_kaon_data_train = Dllp_kaon_data_train.to_numpy()
Dllp2_kaon_data_train = Dllp2_kaon_data_train.to_numpy()
Dlld_kaon_data_train = Dlld_kaon_data_train.to_numpy()
Dlld2_kaon_data_train = Dlld2_kaon_data_train.to_numpy()
Dllbt_kaon_data_train = Dllbt_kaon_data_train.to_numpy()
Dllbt2_kaon_data_train = Dllbt2_kaon_data_train.to_numpy()
# Reshape arrays
P_kaon_data_train = np.array(P_kaon_data_train).reshape(-1, 1)
Pt_kaon_data_train = np.array(Pt_kaon_data_train).reshape(-1, 1)
nTracks_kaon_data_train = np.array(nTracks_kaon_data_train).reshape(-1, 1)
numRich1_kaon_data_train = np.array(numRich1_kaon_data_train).reshape(-1, 1)
numRich2_kaon_data_train = np.array(numRich2_kaon_data_train).reshape(-1, 1)
rich1EntryX_kaon_data_train = np.array(rich1EntryX_kaon_data_train).reshape(-1, 1)
rich1EntryY_kaon_data_train = np.array(rich1EntryY_kaon_data_train).reshape(-1, 1)
rich1ExitX_kaon_data_train = np.array(rich1ExitX_kaon_data_train).reshape(-1, 1)
rich1ExitY_kaon_data_train = np.array(rich1ExitY_kaon_data_train).reshape(-1, 1)
rich2EntryX_kaon_data_train = np.array(rich2EntryX_kaon_data_train).reshape(-1, 1)
rich2EntryY_kaon_data_train = np.array(rich2EntryY_kaon_data_train).reshape(-1, 1)
rich2ExitX_kaon_data_train = np.array(rich2ExitX_kaon_data_train).reshape(-1, 1)
rich2ExitY_kaon_data_train = np.array(rich2ExitY_kaon_data_train).reshape(-1, 1)
Dlle_kaon_data_train = np.array(Dlle_kaon_data_train).reshape(-1, 1)
Dlle2_kaon_data_train = np.array(Dlle2_kaon_data_train).reshape(-1, 1)
Dllmu_kaon_data_train = np.array(Dllmu_kaon_data_train).reshape(-1, 1)
Dllmu2_kaon_data_train = np.array(Dllmu2_kaon_data_train).reshape(-1, 1)
Dllk_kaon_data_train = np.array(Dllk_kaon_data_train).reshape(-1, 1)
Dllk2_kaon_data_train = np.array(Dllk2_kaon_data_train).reshape(-1, 1)
Dllp_kaon_data_train = np.array(Dllp_kaon_data_train).reshape(-1, 1)
Dllp2_kaon_data_train = np.array(Dllp2_kaon_data_train).reshape(-1, 1)
Dlld_kaon_data_train = np.array(Dlld_kaon_data_train).reshape(-1, 1)
Dlld2_kaon_data_train = np.array(Dlld2_kaon_data_train).reshape(-1, 1)
Dllbt_kaon_data_train = np.array(Dllbt_kaon_data_train).reshape(-1, 1)
Dllbt2_kaon_data_train = np.array(Dllbt2_kaon_data_train).reshape(-1, 1)
inputs_kaon_data_train = np.concatenate((P_kaon_data_train, Pt_kaon_data_train, nTracks_kaon_data_train, numRich1_kaon_data_train, numRich2_kaon_data_train, rich1EntryX_kaon_data_train,
rich1EntryY_kaon_data_train, rich1ExitX_kaon_data_train, rich1ExitY_kaon_data_train, rich2EntryX_kaon_data_train, rich2EntryY_kaon_data_train, rich2ExitX_kaon_data_train, rich2ExitY_kaon_data_train), axis=1)
Dll_kaon_data_train = np.concatenate((Dlle_kaon_data_train, Dllmu_kaon_data_train, Dllk_kaon_data_train, Dllp_kaon_data_train, Dlld_kaon_data_train, Dllbt_kaon_data_train), axis=1)
Dll2_kaon_data_train = np.concatenate((Dlle2_kaon_data_train, Dllmu2_kaon_data_train, Dllk2_kaon_data_train, Dllp2_kaon_data_train, Dlld2_kaon_data_train, Dllbt2_kaon_data_train), axis=1)
print('Transforming inputs and outputs using Quantile Transformer...')
scaler_inputs = QuantileTransformer(output_distribution='normal', n_quantiles=int(1e5), subsample=int(1e10)).fit(inputs_kaon_data_train)
scaler_Dll = QuantileTransformer(output_distribution='normal', n_quantiles=int(1e5), subsample=int(1e10)).fit(Dll_kaon_data_train)
scaler_Dll2 = QuantileTransformer(output_distribution='normal', n_quantiles=int(1e5), subsample=int(1e10)).fit(Dll2_kaon_data_train)
inputs_kaon_data_train = scaler_inputs.transform(inputs_kaon_data_train)
Dll_kaon_data_train = scaler_Dll.transform(Dll_kaon_data_train)
Dll2_kaon_data_train = scaler_Dll2.transform(Dll2_kaon_data_train)
inputs_kaon_data_train = mapping(inputs_kaon_data_train)
Dll_kaon_data_train = mapping(Dll_kaon_data_train)
Dll2_kaon_data_train = mapping(Dll2_kaon_data_train)
# REPEATING FOR TESTING DATA
print("Producing testing data...")
# add all inputs
P_kaon_data_test = kaon_data_test['TrackP']
Pt_kaon_data_test = kaon_data_test['TrackPt']
nTracks_kaon_data_test = kaon_data_test['NumLongTracks']
numRich1_kaon_data_test = kaon_data_test['NumRich1Hits']
numRich2_kaon_data_test = kaon_data_test['NumRich2Hits']
rich1EntryX_kaon_data_test = kaon_data_test['TrackRich1EntryX']
rich1EntryY_kaon_data_test = kaon_data_test['TrackRich1EntryY']
rich1ExitX_kaon_data_test = kaon_data_test['TrackRich1ExitX']
rich1ExitY_kaon_data_test = kaon_data_test['TrackRich1ExitY']
rich2EntryX_kaon_data_test = kaon_data_test['TrackRich2EntryX']
rich2EntryY_kaon_data_test = kaon_data_test['TrackRich2EntryY']
rich2ExitX_kaon_data_test = kaon_data_test['TrackRich2ExitX']
rich2ExitY_kaon_data_test = kaon_data_test['TrackRich2ExitY']
# add different DLL outputs
Dlle_kaon_data_test = kaon_data_test['RichDLLe']
Dlle2_kaon_data_test = kaon_data_test['RichDLLe2']
Dllmu_kaon_data_test = kaon_data_test['RichDLLmu']
Dllmu2_kaon_data_test = kaon_data_test['RichDLLmu2']
Dllk_kaon_data_test = kaon_data_test['RichDLLk']
Dllk2_kaon_data_test = kaon_data_test['RichDLLk2']
Dllp_kaon_data_test = kaon_data_test['RichDLLp']
Dllp2_kaon_data_test = kaon_data_test['RichDLLp2']
Dlld_kaon_data_test = kaon_data_test['RichDLLd']
Dlld2_kaon_data_test = kaon_data_test['RichDLLd2']
Dllbt_kaon_data_test = kaon_data_test['RichDLLbt']
Dllbt2_kaon_data_test = kaon_data_test['RichDLLbt2']
# convert to numpy array
P_kaon_data_test = P_kaon_data_test.to_numpy()
Pt_kaon_data_test = Pt_kaon_data_test.to_numpy()
nTracks_kaon_data_test = nTracks_kaon_data_test.to_numpy()
numRich1_kaon_data_test = numRich1_kaon_data_test.to_numpy()
numRich2_kaon_data_test = numRich2_kaon_data_test.to_numpy()
rich1EntryX_kaon_data_test = rich1EntryX_kaon_data_test.to_numpy()
rich1EntryY_kaon_data_test = rich1EntryY_kaon_data_test.to_numpy()
rich1ExitX_kaon_data_test = rich1ExitX_kaon_data_test.to_numpy()
rich1ExitY_kaon_data_test = rich1ExitY_kaon_data_test.to_numpy()
rich2EntryX_kaon_data_test = rich2EntryX_kaon_data_test.to_numpy()
rich2EntryY_kaon_data_test = rich2EntryY_kaon_data_test.to_numpy()
rich2ExitX_kaon_data_test = rich2ExitX_kaon_data_test.to_numpy()
rich2ExitY_kaon_data_test = rich2ExitY_kaon_data_test.to_numpy()
Dlle_kaon_data_test = Dlle_kaon_data_test.to_numpy()
Dlle2_kaon_data_test = Dlle2_kaon_data_test.to_numpy()
Dllmu_kaon_data_test = Dllmu_kaon_data_test.to_numpy()
Dllmu2_kaon_data_test = Dllmu2_kaon_data_test.to_numpy()
Dllk_kaon_data_test = Dllk_kaon_data_test.to_numpy()
Dllk2_kaon_data_test = Dllk2_kaon_data_test.to_numpy()
Dllp_kaon_data_test = Dllp_kaon_data_test.to_numpy()
Dllp2_kaon_data_test = Dllp2_kaon_data_test.to_numpy()
Dlld_kaon_data_test = Dlld_kaon_data_test.to_numpy()
Dlld2_kaon_data_test = Dlld2_kaon_data_test.to_numpy()
Dllbt_kaon_data_test = Dllbt_kaon_data_test.to_numpy()
Dllbt2_kaon_data_test = Dllbt2_kaon_data_test.to_numpy()
P_kaon_data_test = np.array(P_kaon_data_test).reshape(-1, 1)
Pt_kaon_data_test = np.array(Pt_kaon_data_test).reshape(-1, 1)
nTracks_kaon_data_test = np.array(nTracks_kaon_data_test).reshape(-1, 1)
numRich1_kaon_data_test = np.array(numRich1_kaon_data_test).reshape(-1, 1)
numRich2_kaon_data_test = np.array(numRich2_kaon_data_test).reshape(-1, 1)
rich1EntryX_kaon_data_test = np.array(rich1EntryX_kaon_data_test).reshape(-1, 1)
rich1EntryY_kaon_data_test = np.array(rich1EntryY_kaon_data_test).reshape(-1, 1)
rich1ExitX_kaon_data_test = np.array(rich1ExitX_kaon_data_test).reshape(-1, 1)
rich1ExitY_kaon_data_test = np.array(rich1ExitY_kaon_data_test).reshape(-1, 1)
rich2EntryX_kaon_data_test = np.array(rich2EntryX_kaon_data_test).reshape(-1, 1)
rich2EntryY_kaon_data_test = np.array(rich2EntryY_kaon_data_test).reshape(-1, 1)
rich2ExitX_kaon_data_test = np.array(rich2ExitX_kaon_data_test).reshape(-1, 1)
rich2ExitY_kaon_data_test = np.array(rich2ExitY_kaon_data_test).reshape(-1, 1)
Dlle_kaon_data_test = np.array(Dlle_kaon_data_test).reshape(-1, 1)
Dlle2_kaon_data_test = np.array(Dlle2_kaon_data_test).reshape(-1, 1)
Dllmu_kaon_data_test = np.array(Dllmu_kaon_data_test).reshape(-1, 1)
Dllmu2_kaon_data_test = np.array(Dllmu2_kaon_data_test).reshape(-1, 1)
Dllk_kaon_data_test = np.array(Dllk_kaon_data_test).reshape(-1, 1)
Dllk2_kaon_data_test = np.array(Dllk2_kaon_data_test).reshape(-1, 1)
Dllp_kaon_data_test = np.array(Dllp_kaon_data_test).reshape(-1, 1)
Dllp2_kaon_data_test = np.array(Dllp2_kaon_data_test).reshape(-1, 1)
Dlld_kaon_data_test = np.array(Dlld_kaon_data_test).reshape(-1, 1)
Dlld2_kaon_data_test = np.array(Dlld2_kaon_data_test).reshape(-1, 1)
Dllbt_kaon_data_test = np.array(Dllbt_kaon_data_test).reshape(-1, 1)
Dllbt2_kaon_data_test = np.array(Dllbt2_kaon_data_test).reshape(-1, 1)
inputs_kaon_data_test = np.concatenate((P_kaon_data_test, Pt_kaon_data_test, nTracks_kaon_data_test, numRich1_kaon_data_test, numRich2_kaon_data_test, rich1EntryX_kaon_data_test, rich1EntryY_kaon_data_test, rich1ExitX_kaon_data_test, rich1ExitY_kaon_data_test, rich2EntryX_kaon_data_test, rich2EntryY_kaon_data_test, rich2ExitX_kaon_data_test, rich2ExitY_kaon_data_test), axis=1)
Dll_kaon_data_test = np.concatenate((Dlle_kaon_data_test, Dllmu_kaon_data_test, Dllk_kaon_data_test, Dllp_kaon_data_test, Dlld_kaon_data_test, Dllbt_kaon_data_test), axis=1)
Dll2_kaon_data_test = np.concatenate((Dlle2_kaon_data_test, Dllmu2_kaon_data_test, Dllk2_kaon_data_test, Dllp2_kaon_data_test, Dlld2_kaon_data_test, Dllbt2_kaon_data_test), axis=1)
print('Transforming inputs and outputs using Quantile Transformer...')
inputs_kaon_data_test = scaler_inputs.transform(inputs_kaon_data_test)
Dll_kaon_data_test = scaler_Dll.transform(Dll_kaon_data_test)
Dll2_kaon_data_test = scaler_Dll.transform(Dll2_kaon_data_test)
inputs_kaon_data_test = mapping(inputs_kaon_data_test)
Dll_kaon_data_test = mapping(Dll_kaon_data_test)
Dll2_kaon_data_test = mapping(Dll2_kaon_data_test)
# Producing testing data
params_list_test = np.random.normal(loc=mu, scale=sigma, size=[len(kaon_data_test), self.params])
for e in range(len(kaon_data_test)):
params_list_test[e][0] = inputs_kaon_data_test[e][0]
params_list_test[e][1] = inputs_kaon_data_test[e][1]
params_list_test[e][2] = inputs_kaon_data_test[e][2]
params_list_test[e][3] = inputs_kaon_data_test[e][3]
params_list_test[e][4] = inputs_kaon_data_test[e][4]
params_list_test[e][5] = inputs_kaon_data_test[e][5]
params_list_test[e][6] = inputs_kaon_data_test[e][6]
params_list_test[e][7] = inputs_kaon_data_test[e][7]
params_list_test[e][8] = inputs_kaon_data_test[e][8]
params_list_test[e][9] = inputs_kaon_data_test[e][9]
params_list_test[e][10] = inputs_kaon_data_test[e][10]
params_list_test[e][11] = inputs_kaon_data_test[e][11]
params_list_test[e][12] = inputs_kaon_data_test[e][12]
obs_simu_1_test = np.zeros((len(kaon_data_test), self.observables, 1))
obs_simu_1_test.fill(-1)
for e in range(len(kaon_data_test)):
obs_simu_1_test[e][0][0] = Dll_kaon_data_test[e][0]
obs_simu_1_test[e][1][0] = Dll_kaon_data_test[e][1]
obs_simu_1_test[e][2][0] = Dll_kaon_data_test[e][2]
obs_simu_1_test[e][3][0] = Dll_kaon_data_test[e][3]
obs_simu_1_test[e][4][0] = Dll_kaon_data_test[e][4]
obs_simu_1_test[e][5][0] = Dll_kaon_data_test[e][5]
obs_simu_2_test = np.zeros((len(kaon_data_test), self.observables, 1))
obs_simu_2_test.fill(-1)
for e in range(len(kaon_data_test)):
obs_simu_2_test[e][0][0] = Dll2_kaon_data_test[e][0]
obs_simu_2_test[e][1][0] = Dll2_kaon_data_test[e][1]
obs_simu_2_test[e][2][0] = Dll2_kaon_data_test[e][2]
obs_simu_2_test[e][3][0] = Dll2_kaon_data_test[e][3]
obs_simu_2_test[e][4][0] = Dll2_kaon_data_test[e][4]
obs_simu_2_test[e][5][0] = Dll2_kaon_data_test[e][5]
event_no_par = 0
event_no_obs_1 = 0
event_no_obs_2 = 0
d1_hist, d2_hist, d_hist, g_hist, a1_hist, a2_hist = list(), list(), list(), list(), list(), list()
print('Beginning pre-training...')
'''
#Pre-training stage
'''
for train_step in range(pretrain_steps):
log_mesg = '%d' % train_step
noise_value = 0.3
params_list = np.random.normal(loc=mu,scale=sigma, size=[batch_size, self.params])
y_ones = np.ones([batch_size, 1])
y_zeros = np.zeros([batch_size, 1])
# add physics parameters + noise to params_list
for b in range(batch_size):
params_list[b][0] = inputs_kaon_data_train[event_no_par][0]
params_list[b][1] = inputs_kaon_data_train[event_no_par][1]
params_list[b][2] = inputs_kaon_data_train[event_no_par][2]
params_list[b][3] = inputs_kaon_data_train[event_no_par][3]
params_list[b][4] = inputs_kaon_data_train[event_no_par][4]
params_list[b][5] = inputs_kaon_data_train[event_no_par][5]
params_list[b][6] = inputs_kaon_data_train[event_no_par][6]
params_list[b][7] = inputs_kaon_data_train[event_no_par][7]
params_list[b][8] = inputs_kaon_data_train[event_no_par][8]
params_list[b][9] = inputs_kaon_data_train[event_no_par][9]
params_list[b][10] = inputs_kaon_data_train[event_no_par][10]
params_list[b][11] = inputs_kaon_data_train[event_no_par][11]
params_list[b][12] = inputs_kaon_data_train[event_no_par][12]
event_no_par += 1
# Step 1
# simulated observables (number 1)
obs_simu_1 = np.zeros((batch_size, self.observables, 1))
obs_simu_1.fill(-1)
for b in range(batch_size):
obs_simu_1[b][0][0] = Dll_kaon_data_train[event_no_obs_1][0]
obs_simu_1[b][1][0] = Dll_kaon_data_train[event_no_obs_1][1]
obs_simu_1[b][2][0] = Dll_kaon_data_train[event_no_obs_1][2]
obs_simu_1[b][3][0] = Dll_kaon_data_train[event_no_obs_1][3]
obs_simu_1[b][4][0] = Dll_kaon_data_train[event_no_obs_1][4]
obs_simu_1[b][5][0] = Dll_kaon_data_train[event_no_obs_1][5]
event_no_obs_1 += 1
obs_simu_1_copy = np.copy(obs_simu_1)
# simulated observables (Gaussian smeared - number 2)
obs_simu_2 = np.zeros((batch_size, self.observables, 1))
obs_simu_2.fill(-1)
for b in range(batch_size):
obs_simu_2[b][0][0] = Dll2_kaon_data_train[event_no_obs_2][0]
obs_simu_2[b][1][0] = Dll2_kaon_data_train[event_no_obs_2][1]
obs_simu_2[b][2][0] = Dll2_kaon_data_train[event_no_obs_2][2]
obs_simu_2[b][3][0] = Dll2_kaon_data_train[event_no_obs_2][3]
obs_simu_2[b][4][0] = Dll2_kaon_data_train[event_no_obs_2][4]
obs_simu_2[b][5][0] = Dll2_kaon_data_train[event_no_obs_2][5]
event_no_obs_2 += 1
obs_simu_2_copy = np.copy(obs_simu_2)
# emulated DLL values
obs_emul = self.emulator.predict(params_list)
obs_emul_copy = np.copy(obs_emul)
# decay the learn rate
if(train_step % 1000 == 0 and train_step>0):
siamese_lr = K.eval(self.siamese.optimizer.lr)
K.set_value(self.siamese.optimizer.lr, siamese_lr*0.7)
print('lr for Siamese network updated from %f to %f' % (siamese_lr, siamese_lr*0.7))
adversarial1_lr = K.eval(self.adversarial1.optimizer.lr)
K.set_value(self.adversarial1.optimizer.lr, adversarial1_lr*0.7)
print('lr for Adversarial1 network updated from %f to %f' % (adversarial1_lr, adversarial1_lr*0.7))
loss_simu_list = [obs_simu_1_copy, obs_simu_2_copy]
loss_fake_list = [obs_simu_1_copy, obs_emul_copy]
input_val = 0
# swap which inputs to give to Siamese network
if(np.random.random() < 0.5):
loss_simu_list[0], loss_simu_list[1] = loss_simu_list[1], loss_simu_list[0]
if(np.random.random() < 0.5):
loss_fake_list[0] = obs_simu_2_copy
input_val = 1
# noise
y_ones = np.array([np.random.uniform(0.97, 1.00) for x in range(batch_size)]).reshape([batch_size, 1])
y_zeros = np.array([np.random.uniform(0.00, 0.03) for x in range(batch_size)]).reshape([batch_size, 1])
if(input_val == 0):
if np.random.random() < noise_value:
for b in range(batch_size):
if np.random.random() < noise_value:
obs_simu_1_copy[b], obs_simu_2_copy[b] = obs_simu_2[b], obs_simu_1[b]
obs_simu_1_copy[b], obs_emul_copy[b] = obs_emul[b], obs_simu_1[b]
if(input_val == 1):
if np.random.random() < noise_value:
for b in range(batch_size):
if np.random.random() < noise_value:
obs_simu_1_copy[b], obs_simu_2_copy[b] = obs_simu_2[b], obs_simu_1[b]
obs_simu_2_copy[b], obs_emul_copy[b] = obs_emul[b], obs_simu_2[b]
# train siamese
d_loss_simu = self.siamese.train_on_batch(loss_simu_list, y_ones)
d_loss_fake = self.siamese.train_on_batch(loss_fake_list, y_zeros)
d_loss = 0.5 * np.add(d_loss_simu, d_loss_fake)
log_mesg = '%s [S loss: %f]' % (log_mesg, d_loss[0])
#print(log_mesg)
#print('--------------------')
#noise_value*=0.999
#Step 2
# train emulator
a_loss_list = [obs_simu_1, params_list]
a_loss = self.adversarial1.train_on_batch(a_loss_list, y_ones)
log_mesg = '%s [E loss: %f]' % (log_mesg, a_loss[0])
print(log_mesg)
print('--------------------')
noise_value*=0.999
if __name__ == '__main__':
params_physics = 13
params_noise = 51 #51 looks ok, 61 is probably best, 100 also works
params = params_physics + params_noise
observables= 6
train_no = 1
magan = ModelAssistedGANPID(params=params, observables=observables)
magan.train(pretrain_steps=11001, train_steps=10000, batch_size=32, train_no=train_no)
NETWORKS
class Networks(object):
def __init__(self, noise_size=100, params=64, observables=5):
self.noise_size = noise_size
self.params = params
self.observables = observables
self.E = None # emulator
self.S = None # siamese
self.SM = None # siamese model
self.AM1 = None # adversarial model 1
'''
Emulator: generate identical observable parameters to those of the simulator S when both E and S are fed with the same input parameters
'''
def emulator(self):
if self.E:
return self.E
# input params
# the model takes as input an array of shape (*, self.params = 6)
input_params_shape = (self.params,)
input_params_layer = Input(shape=input_params_shape, name='input_params')
# architecture
self.E = Dense(1024)(input_params_layer)
self.E = LeakyReLU(0.2)(self.E)
self.E = Dense(self.observables*128, kernel_initializer=initializers.RandomNormal(stddev=0.02))(self.E)
self.E = LeakyReLU(0.2)(self.E)
self.E = Reshape((self.observables, 128))(self.E)
self.E = UpSampling1D(size=2)(self.E)
self.E = Conv1D(64, kernel_size=7, padding='valid')(self.E)
self.E = LeakyReLU(0.2)(self.E)
self.E = UpSampling1D(size=2)(self.E)
self.E = Conv1D(1, kernel_size=7, padding='valid', activation='tanh')(self.E)
# model
self.E = Model(inputs=input_params_layer, outputs=self.E, name='emulator')
# print
print("Emulator")
self.E.summary()
return self.E
'''
Siamese: determine the similarity between output values produced by the simulator and emulator
'''
def siamese(self):
if self.S:
return self.S
# input DLL images
input_shape = (self.observables, 1)
input_layer_anchor = Input(shape=input_shape, name='input_layer_anchor')
input_layer_candid = Input(shape=input_shape, name='input_layer_candidate')
input_layer = Input(shape=input_shape, name='input_layer')
# siamese
cnn = Conv1D(64, kernel_size=8, strides=2, padding='same',
kernel_initializer=initializers.RandomNormal(stddev=0.02))(input_layer)
cnn = LeakyReLU(0.2)(cnn)
cnn = Conv1D(128, kernel_size=5, strides=2, padding='same')(cnn)
cnn = LeakyReLU(0.2)(cnn)
cnn = Flatten()(cnn)
cnn = Dense(128, activation='sigmoid')(cnn)
cnn = Model(inputs=input_layer, outputs=cnn, name='cnn')
# left and right encodings
encoded_l = cnn(input_layer_anchor)
encoded_r = cnn(input_layer_candid)
# merge two encoded inputs with the L1 or L2 distance between them
L1_distance = lambda x: K.abs(x[0]-x[1])
L2_distance = lambda x: (x[0]-x[1]+K.epsilon())**2/(x[0]+x[1]+K.epsilon())
both = Lambda(L2_distance)([encoded_l, encoded_r])
prediction = Dense(1, activation='sigmoid')(both)
# model
self.S = Model([input_layer_anchor, input_layer_candid], outputs=prediction, name='siamese')
# print
print("Siamese:")
self.S.summary()
print("Siamese CNN:")
cnn.summary()
return self.S
'''
Siamese model
'''
def siamese_model(self):
if self.SM:
return self.SM
# optimizer
optimizer = Adam(lr=0.004, beta_1=0.5, beta_2=0.9)
# input DLL values
input_shape = (self.observables, 1)
input_layer_anchor = Input(shape=input_shape, name='input_layer_anchor')
input_layer_candid = Input(shape=input_shape, name='input_layer_candidate')
input_layer = [input_layer_anchor, input_layer_candid]
# discriminator
siamese_ref = self.siamese()
siamese_ref.trainable = True
self.SM = siamese_ref(input_layer)
# model
self.SM = Model(inputs=input_layer, outputs=self.SM, name='siamese_model')
self.SM.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=[metrics.binary_accuracy])
print("Siamese model")
self.SM.summary()
return self.SM
'''
Adversarial 1 model (adversarial pre-training phase) - this is where the emulator and siamese network are trained to enable the emulator to generate DLL values for a set of given physics inputs
'''
def adversarial1_model(self):
if self.AM1:
return self.AM1
optimizer = Adam(lr=0.0004, beta_1=0.5, beta_2=0.9)
# input 1: simulated DLL values
input_obs_shape = (self.observables, 1)
input_obs_layer = Input(shape=input_obs_shape, name='input_obs')
# input 2: params
input_params_shape = (self.params, )
input_params_layer = Input(shape=input_params_shape, name='input_params')
# emulator
emulator_ref = self.emulator()
emulator_ref.trainable = True
self.AM1 = emulator_ref(input_params_layer)
# siamese
siamese_ref = self.siamese()
siamese_ref.trainable = False
self.AM1 = siamese_ref([input_obs_layer, self.AM1])
# model
input_layer = [input_obs_layer, input_params_layer]
self.AM1 = Model(inputs=input_layer, outputs=self.AM1, name='adversarial_1_model')
self.AM1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=[metrics.binary_accuracy])
# print
print("Adversarial 1 model:")
self.AM1.summary()
return self.AM1
INPUTS PLOT
OUTPUTS PLOT
LOSS PLOT
GENERATED OUTPUT (ORANGE) vs TRUE OUTPUT (BLUE)
I have this two dataframes
1) t1 (Var1(numeric), Var2(string), Freq (numeric))
2) pref.media (pref.media (numeric)
I have this separated graphs, but I want to print like two series in the same graph
1) Column chart, ok
column1 <- highchart() %>%
hc_add_series(t1$Freq, type = "column") %>%
hc_xAxis(categories = t1$Var1) %>%
hc_plotOptions(series = list(showInLegend = FALSE,dataLabels = list(enabled = TRUE, color = t1$Var1)))
2) Solid gauge chart (ok)
gauge1 <- highchart(width = 800, height = 600) %>%
hc_chart(type = "solidgauge",backgroundColor = "#F0F0F0",marginTop = 50) %>%
hc_title(text = "Preferencia",style = list(fontSize = "24px")) %>%
hc_tooltip(borderWidth = 0, backgroundColor = 'none',shadow = FALSE,style = list(fontSize = '16px')) %>%
hc_pane(startAngle = -90,endAngle = 90) %>%
hc_yAxis(min = 0,max = 11,lineWidth = 0,tickPositions = list(0,1,2,3,4,5,6,7,8,9,10,11)) %>%
hc_plotOptions(solidgauge = list(borderWidth = '34px',dataLabels = list(enabled = TRUE, style = list(borderWidth = 3,backgroundColor = 'none',shadow = FALSE, fontSize = '16px',color="#888800")),linecap = 'round',stickyTracking = FALSE)) %>%
hc_add_series(name = "Preferencia",borderColor = "#000000",data = list(list(color = "#888800"),radius = "100%",innerRadius = "100%",y = round(pref.media[[1,1]],2)))
3) Mixing the charts (not ok, with errors)
highchart() %>%
hc_add_series(t1, "column", hcaes(x = Var1, y = Freq), name = "Preferencia") %>%
hc_add_series(pref.media, "solidgauge", hcaes(name = "Gauge", y = round(pref.media[[1,1]],2)), name = "Indicador") %>%
hc_plotOptions(
series = list(showInLegend = FALSE,pointFormat = "{point.y}%"),
column = list(colorByPoint = TRUE),
solidgauge = list(borderWidth = '34px',dataLabels = list(enabled = TRUE),linecap = 'round',stickyTracking = FALSE)) %>%
hc_pane(startAngle = -90,endAngle = 90) %>%
hc_yAxis(title = list(text = "Porcentaje de preferencia"),labels = list(format = "{value}%"), max = 100) %>%
hc_xAxis(categories = t1$Var1) %>%
hc_title(text = "Título") %>%
hc_subtitle(text = "Subtítulo")
I don't understand the sequence for mixing two series. I tried it with a J.Kunst's example in http://jkunst.com/highcharter/highcharts.html
Thanks
I need to check if first value is >= 'from' and second value is <= 'to', if true then my function retun number. It's working but I don't know if this is the best and most optimized way to get value(number from table).
local table = {
{from = -1, to = 12483, number = 0},
{from = 12484, to = 31211, number = 1},
{from = 31212, to = 53057, number = 2},
{from = 53058, to = 90200, number = 3},
{from = 90201, to = 153341, number = 4},
{from = 153342, to = 443162, number = 5},
{from = 443163, to = 753380, number = 6},
{from = 753381, to = 1280747, number = 7},
{from = 1280748, to = 2689570, number = 8},
{from = 2689571, to = 6723927, number = 9},
{from = 6723928, to = 6723928, number = 10}
}
local exampleFromValue = 31244
local exampleToValue = 42057
local function getNumber()
local number = 0
for k, v in pairs(table) do
if (v.from and exampleFromValue >= v.from) and (v.to and exampleToValue <= v.to) then
number = v.number
break
end
end
return number
end
print(getNumber())
With this small amount of data, such function doesn't seem like a performace issue. However, you can compress the data a bit:
local t = {
12484, 31212, 53058, 90201, 153342, 443163, 753381, 1280748, 2689571, 6723928
}
local exampleFromValue = 31244
local exampleToValue = 42057
local function getNumber()
local last = -1
for i, v in ipairs(t) do
if exampleFromValue >= last and exampleToValue < v then
return i - 1
end
last = v
end
return 0
end
Trying to recreate the Tensorflow NMT example
https://github.com/tensorflow/nmt#bidirectional-rnns
with a baseline of just copying the original sentence instead of translating, but getting this weird bug for my inference part where the only outputs are the same words multiple times.
Using the TF attention wrapper and using Greedy Embedding Helper for inference.
Using TF 1.3 and python 3.6 if that helps
Screenshot of the bugged prediction
The weird thing is during training, the predictions are normal and the loss decreased to around 0.1
I have already checked the embeddings and they do change from each time step and I suspect that this has something to do with the decoding stage since it's the only part the really changes from training to inference.
tf.reset_default_graph()
sess = tf.InteractiveSession()
PAD = 0
EOS = 1
max_gradient_norm = 1
learning_rate = 0.02
num_layers = 1
total_epoch = 2
sentence_length = 19
vocab_size = 26236
input_embedding_size = 128
if mode == "training":
batch_size = 100
isReused = None
else:
batch_size = 1
isReused = True
with tf.name_scope("encoder"):
encoder_embeddings = tf.get_variable('encoder_embeddings', [vocab_size, input_embedding_size], tf.float32,
tf.random_uniform_initializer(-1.0, 1.0))
encoder_hidden_units = 128
encoder_inputs = tf.placeholder(shape=(batch_size, None), dtype=tf.int32, name='encoder_inputs')
encoder_lengths = tf.placeholder(shape=batch_size, dtype=tf.int32, name='encoder_lengths')
encoder_cell = tf.contrib.rnn.BasicLSTMCell(encoder_hidden_units, state_is_tuple=True)
encoder_inputs_embedded = tf.nn.embedding_lookup(encoder_embeddings, encoder_inputs)
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(encoder_cell, encoder_inputs_embedded, dtype=tf.float32,
sequence_length=encoder_lengths, time_major=False)
with tf.variable_scope("decoder", isReused):
decoder_hidden_units = encoder_hidden_units
decoder_inputs = tf.placeholder(shape=(batch_size, None), dtype=tf.int32, name='decoder_inputs')
decoder_targets = tf.placeholder(shape=(batch_size, None), dtype=tf.int32, name='decoder_targets')
decoder_lengths = tf.placeholder(shape=batch_size, dtype=tf.int32, name="decoder_lengths")
decoder_embeddings = tf.get_variable('decoder_embeddings', [vocab_size, input_embedding_size], tf.float32,
tf.random_uniform_initializer(-1.0, 1.0))
decoder_inputs_embedded = tf.nn.embedding_lookup(decoder_embeddings, decoder_inputs)
decoder_cell = tf.contrib.rnn.BasicLSTMCell(decoder_hidden_units, state_is_tuple=True)
projection_layer = layers_core.Dense(vocab_size, use_bias=False)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(encoder_hidden_units, encoder_outputs,
memory_sequence_length=encoder_lengths)
attn_decoder_cell = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, attention_mechanism,
attention_layer_size=encoder_hidden_units)
if mode == "training":
helper = tf.contrib.seq2seq.TrainingHelper(decoder_inputs_embedded, decoder_lengths, time_major=False)
maximum_iterations = None
else:
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(decoder_embeddings, tf.fill([batch_size], EOS), EOS)
maximum_iterations = tf.round(tf.reduce_max(encoder_lengths) * 2)
# Decoder
init_state = attn_decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=encoder_state)
decoder = tf.contrib.seq2seq.BasicDecoder(attn_decoder_cell, helper, init_state, output_layer=projection_layer)
# Dynamic decoding
decoder_outputs, decoder_final_state, _ = tf.contrib.seq2seq.dynamic_decode(decoder, output_time_major=False,
swap_memory=True,
maximum_iterations=maximum_iterations)
decoder_logits = decoder_outputs.rnn_output
decoder_prediction = decoder_outputs.sample_id
if mode == "training":
with tf.name_scope("cross_entropy"):
labels = tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32)
decoder_crossent = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=decoder_logits)
with tf.name_scope("loss"):
target_weights = tf.sequence_mask(decoder_lengths, maxlen=20, dtype=decoder_logits.dtype)
train_loss = tf.reduce_sum(decoder_crossent * target_weights) / (batch_size * 20)
tf.summary.scalar('loss', train_loss)
with tf.name_scope("clip_gradients"):
params = tf.trainable_variables()
gradients = tf.gradients(train_loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_gradient_norm)
with tf.name_scope("Optimizer"):
optimizer = tf.train.AdamOptimizer(learning_rate)
update_step = optimizer.apply_gradients(zip(clipped_gradients, params))
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.getcwd() + '/train', sess.graph)
test_writer = tf.summary.FileWriter(os.getcwd() + '/test', )
tf.global_variables_initializer().run()
sess.run(tf.global_variables_initializer())