How to calculate unit variance in tensorflow? - machine-learning

My in put is a set of images and I want to calculate the univariance over my images. But when try to check with numpy, the unit variance should give 1 in the end.
What I am doing wrong in my code?
def pre_processing(img_list, zero_mean=True, unit_var=True):
with tf.device('/cpu:0'):
tn_img0 = img_list[0][1]
tn_img1 = img_list[1][1]
t_img = tn_img0
# t_img = tf.concat([tn_img0, tn_img1], axis=0)
rgb_mean, rgb_var = tf.nn.moments(t_img, [0, 1])
if zero_mean:
tn_img0 = tf.subtract(img_list[0][1], rgb_mean)
tn_img1 = tf.subtract(img_list[1][1], rgb_mean)
if unit_var:
tn_img0 = tf.divide(tn_img0, rgb_var)
tn_img1 = tf.divide(tn_img1, rgb_var)

You should divide by the standard deviation to get a unit variance of your inputs. So change your code to:
tn_img0 = tf.divide(tn_img0, tf.sqrt(rgb_var))
tn_img1 = tf.divide(tn_img1, tf.sqrt(rgb_var))

Related

Why does all my emission mu of HMM in pyro converge to the same number?

I'm trying to create a Gaussian HMM model in pyro to infer the parameters of a very simple Markov sequence. However, my model fails to infer the parameters and something wired happened during the training process. Using the same sequence, hmmlearn has successfully infer the true parameters.
Full code can be accessed in here:
https://colab.research.google.com/drive/1u_4J-dg9Y1CDLwByJ6FL4oMWMFUVnVNd#scrollTo=ZJ4PzdTUBgJi
My model is modified from the example in here:
https://github.com/pyro-ppl/pyro/blob/dev/examples/hmm.py
I manually created a first order Markov sequence where there are 3 states, the true means are [-10, 0, 10], sigmas are [1,2,1].
Here is my model
def model(observations, num_state):
assert not torch._C._get_tracing_state()
with poutine.mask(mask = True):
p_transition = pyro.sample("p_transition",
dist.Dirichlet((1 / num_state) * torch.ones(num_state, num_state)).to_event(1))
p_init = pyro.sample("p_init",
dist.Dirichlet((1 / num_state) * torch.ones(num_state)))
p_mu = pyro.param(name = "p_mu",
init_tensor = torch.randn(num_state),
constraint = constraints.real)
p_tau = pyro.param(name = "p_tau",
init_tensor = torch.ones(num_state),
constraint = constraints.positive)
current_state = pyro.sample("x_0",
dist.Categorical(p_init),
infer = {"enumerate" : "parallel"})
for t in pyro.markov(range(1, len(observations))):
current_state = pyro.sample("x_{}".format(t),
dist.Categorical(Vindex(p_transition)[current_state, :]),
infer = {"enumerate" : "parallel"})
pyro.sample("y_{}".format(t),
dist.Normal(Vindex(p_mu)[current_state], Vindex(p_tau)[current_state]),
obs = observations[t])
My model is compiled as
device = torch.device("cuda:0")
obs = torch.tensor(obs)
obs = obs.to(device)
torch.set_default_tensor_type("torch.cuda.FloatTensor")
guide = AutoDelta(poutine.block(model, expose_fn = lambda msg : msg["name"].startswith("p_")))
Elbo = Trace_ELBO
elbo = Elbo(max_plate_nesting = 1)
optim = Adam({"lr": 0.001})
svi = SVI(model, guide, optim, elbo)
As the training goes, the ELBO has decreased steadily as shown. However, the three means of the states converges.
I have tried to put the for loop of my model into a pyro.plate and switch pyro.param to pyro.sample and vice versa, but nothing worked for my model.
I have not tried this model, but I think it should be possible to solve the problem by modifying the model in the following way:
def model(observations, num_state):
assert not torch._C._get_tracing_state()
with poutine.mask(mask = True):
p_transition = pyro.sample("p_transition",
dist.Dirichlet((1 / num_state) * torch.ones(num_state, num_state)).to_event(1))
p_init = pyro.sample("p_init",
dist.Dirichlet((1 / num_state) * torch.ones(num_state)))
p_mu = pyro.sample("p_mu",
dist.Normal(torch.zeros(num_state), torch.ones(num_state)).to_event(1))
p_tau = pyro.sample("p_tau",
dist.HalfCauchy(torch.zeros(num_state)).to_event(1))
current_state = pyro.sample("x_0",
dist.Categorical(p_init),
infer = {"enumerate" : "parallel"})
for t in pyro.markov(range(1, len(observations))):
current_state = pyro.sample("x_{}".format(t),
dist.Categorical(Vindex(p_transition)[current_state, :]),
infer = {"enumerate" : "parallel"})
pyro.sample("y_{}".format(t),
dist.Normal(Vindex(p_mu)[current_state], Vindex(p_tau)[current_state]),
obs = observations[t])
The model would then be trained using MCMC:
# MCMC
hmc_kernel = NUTS(model, target_accept_prob = 0.9, max_tree_depth = 7)
mcmc = MCMC(hmc_kernel, num_samples = 1000, warmup_steps = 100, num_chains = 1)
mcmc.run(obs)
The results could then be analysed using:
mcmc.get_samples()

GAN changing input size causes error

Below code takes only 32*32 input, I want to feed in 128*128 images, how to go about it. The code is from the tutorial - https://github.com/awjuliani/TF-Tutorials/blob/master/DCGAN.ipynb
def generator(z):
zP = slim.fully_connected(z,4*4*256,normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_project',weights_initializer=initializer)
zCon = tf.reshape(zP,[-1,4,4,256])
gen1 = slim.convolution2d_transpose(\
zCon,num_outputs=64,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv1', weights_initializer=initializer)
gen2 = slim.convolution2d_transpose(\
gen1,num_outputs=32,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv2', weights_initializer=initializer)
gen3 = slim.convolution2d_transpose(\
gen2,num_outputs=16,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv3', weights_initializer=initializer)
g_out = slim.convolution2d_transpose(\
gen3,num_outputs=1,kernel_size=[32,32],padding="SAME",\
biases_initializer=None,activation_fn=tf.nn.tanh,\
scope='g_out', weights_initializer=initializer)
return g_out
def discriminator(bottom, reuse=False):
dis1 = slim.convolution2d(bottom,16,[4,4],stride=[2,2],padding="SAME",\
biases_initializer=None,activation_fn=lrelu,\
reuse=reuse,scope='d_conv1',weights_initializer=initializer)
dis2 = slim.convolution2d(dis1,32,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv2', weights_initializer=initializer)
dis3 = slim.convolution2d(dis2,64,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv3',weights_initializer=initializer)
d_out = slim.fully_connected(slim.flatten(dis3),1,activation_fn=tf.nn.sigmoid,\
reuse=reuse,scope='d_out', weights_initializer=initializer)
return d_out
Below is the error which I get when I feed 128*128 images.
Trying to share variable d_out/weights, but specified shape (1024, 1) and found shape (16384, 1).
The generator is generating 32*32 images, and thus when we feed any other dimension in discriminator, it results in the given error.
The solution is to generate 128*128 images from the generator, by
1. Adding more no. of layers(2 in this case)
2. Changing the input to the generator
zP = slim.fully_connected(z,16*16*256,normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_project',weights_initializer=initializer)
zCon = tf.reshape(zP,[-1,16,16,256])

Sklearn SVR shows worst result after scaling

Following code works quite well when used without scaling, but when scaling is applied results are too far from actual. Here is the code:
data =(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63).
model = SVR(kernel='poly', C=1e3, degree=3)
data_min = min(data)
data_max = max(data)
diff = data_max - data_min
data_scaled = []
for i in range(0,len(data)):
data_scaled.append((data[i]-data_min)/diff)
data_scaled = np.matrix(data_scaled)
data_scaled = data_scaled.reshape(-1,1)
y = (1,8,27,64,125,216,343,512,729,1000,1331,1728,2197,2744,3375,4096,4913,5832,6859,8000,9261,10648,12167,13824,15625,17576,19683,21952,24389,27000,29791,32768,35937,39304,42875,46656,50653,54872,59319,64000,68921,74088,79507,85184,91125,97336,103823,110592,117649,125000,132651,140608,148877,157464,166375,175616,185193,195112,205379,216000,226981,238328,250047)
model.fit(data_scaled, y)
predicted = model.predict(data_scaled)

How to Display a decomposition wavelet in 3 level?

I want to display a decomposition wavelet in 3 level.
so can any help me in give a Matlab function to display it?
[cA cH cV cD]=dwt2(a,waveletname);
out=[cA cH;cV cD];
figure;imshow(out,[]);
That only works for the first level.
actually, I want to representation square mode such wavemenu in Matlab.
example of the view decomposition
I am fairly new to it.
thanx.
You should use the function wavedec2(Image,numberOfLevels,'wname') with the amount of levels that you need.
For more information look at
http://www.mathworks.com/help/wavelet/ref/wavedec2.html
Code for example with db1
clear all
im = imread('cameraman.tif');
[c,s] = wavedec2(im,3,'db1');
A1 = appcoef2(c,s,'db1',1);
[H1,V1,D1] = detcoef2('all',c,s,1);
A2 = appcoef2(c,s,'db1',2);
[H2,V2,D2] = detcoef2('all',c,s,2);
A3 = appcoef2(c,s,'db1',3);
[H3,V3,D3] = detcoef2('all',c,s,3);
V1img = wcodemat(V1,255,'mat',1);
H1img = wcodemat(H1,255,'mat',1);
D1img = wcodemat(D1,255,'mat',1);
A1img = wcodemat(A1,255,'mat',1);
V2img = wcodemat(V2,255,'mat',1);
H2img = wcodemat(H2,255,'mat',1);
D2img = wcodemat(D2,255,'mat',1);
A2img = wcodemat(A2,255,'mat',1);
V3img = wcodemat(V3,255,'mat',1);
H3img = wcodemat(H3,255,'mat',1);
D3img = wcodemat(D3,255,'mat',1);
A3img = wcodemat(A3,255,'mat',1);
mat3 = [A3img,V3img;H3img,D3img];
mat2 = [mat3,V2img;H2img,D2img];
mat1 = [mat2,V1img;H1img,D1img];
imshow(uint8(mat1))
The final result

kNN Consistently Overusing One Label

I am using a kNN to do some classification of labeled images. After my classification is done, I am outputting a confusion matrix. I noticed that one label, bottle was being applied incorrectly more often.
I removed the label and tested again, but then noticed that another label, shoe was being applied incorrectly, but was fine last time.
There should be no normalization, so I'm unsure what is causing this behavior. Testing showed it continued no matter how many labels I removed.
Not totally sure how much code to post, so I'll put some things that should be relevant and pastebin the rest.
def confusionMatrix(classifier, train_DS_X, train_DS_y, test_DS_X, test_DS_y):
# Will output a confusion matrix graph for the predicion
y_pred = classifier.fit(train_DS_X, train_DS_y).predict(test_DS_X)
labels = set(set(train_DS_y) | set(test_DS_y))
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(test_DS_y , y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
#print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
#print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
Relevant Code from Main Function:
# Select training and test data
PCA = decomposition.PCA(n_components=.95)
zscorer = ZScoreMapper(param_est=('targets', ['rest']), auto_train=False)
DS = getVoxels (1, .5)
train_DS = DS[0]
test_DS = DS[1]
# Apply PCA and ZScoring
train_DS = processVoxels(train_DS, True, zscorer, PCA)
test_DS = processVoxels(test_DS, False, zscorer, PCA)
print 3*"\n"
# Select the desired features
# If selecting samples or PCA, that must be the only feature
featuresOfInterest = ['pca']
trainDSFeat = selectFeatures(train_DS, featuresOfInterest)
testDSFeat = selectFeatures(test_DS, featuresOfInterest)
train_DS_X = trainDSFeat[0]
train_DS_y = trainDSFeat[1]
test_DS_X = testDSFeat[0]
test_DS_y = testDSFeat[1]
# Optimization of neighbors
# Naively searches for local max starting at numNeighbors
lastScore = 0
lastNeightbors = 1
score = .0000001
numNeighbors = 5
while score > lastScore:
lastScore = score
lastNeighbors = numNeighbors
numNeighbors += 1
#Classification
neigh = neighbors.KNeighborsClassifier(n_neighbors=numNeighbors, weights='distance')
neigh.fit(train_DS_X, train_DS_y)
#Testing
score = neigh.score(test_DS_X,test_DS_y )
# Confusion Matrix Output
neigh = neighbors.KNeighborsClassifier(n_neighbors=lastNeighbors, weights='distance')
confusionMatrix(neigh, train_DS_X, train_DS_y, test_DS_X, test_DS_y)
Pastebin: http://pastebin.com/U7yTs3vs
The issue was in part the result of my axis being mislabeled, when I thought I was removing the faulty label I was in actuality just removing a random label, meaning the faulty data was still being analyzed. Fixing the axis and removing the faulty label which was actually rest yielded:
The code I changed is:
cm = confusion_matrix(test_DS_y , y_pred, labels)
Basically I manually set the ordering based on my list of ordered labels.

Resources