my code:
My Drawing Function:
def draw_hyper_plane(coef,intercept,y_max,y_min):
points=np.array([[((-coef*y_min - intercept)/coef), y_min],[((-coef*y_max - intercept)/coef), y_max]])
plt.plot(points[:,0], points[:,1])
Actual Output:
Desired Output:
Through my code i am not able to find the proper hyper plane which can correctly classify the point as in desired output plot. Could any body help me in this
One way is to use the decision_function from the classifier and plot some level line (level=0 correspond to your hyperplane). Here is some code.
def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None):
if eps is None:
eps = X.std() / 2.
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
try:
decision_values = classifier.decision_function(X_grid)
levels = [0]
fill_levels = [decision_values.min(), 0, decision_values.max()]
except AttributeError:
# no decision_function
decision_values = classifier.predict_proba(X_grid)[:, 1]
levels = [.5]
fill_levels = [0, .5, 1]
if ax is None:
ax = plt.gca()
if fill:
ax.contourf(X1, X2, decision_values.reshape(X1.shape),
levels=fill_levels, colors=['tab:blue', 'tab:orange'],
alpha=0.5)
else:
ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,
colors="black")
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
This code was developed there
Related
I'm implementing a fully connected neural network for MNIST (not convolutional!) and I'm having a problem. When I make multiple forward passes and backward passes, the exponents get abnormally high and python is unable to calculate them. It seems to me that I incorrectly registered backward_pass. Could you help me with this. Here are the network settings:
w_1 = np.random.uniform(-0.5, 0.5, (128, 784))
b_1 = np.random.uniform(-0.5, 0.5, (128, 1))
w_2 = np.random.uniform(-0.5, 0.5, (10, 128))
b_2 = np.random.uniform(-0.5, 0.5, (10, 1))
X_train shape: (784, 31500)
y_train shape: (31500,)
X_test shape: (784, 10500)
y_test shape: (10500,)
def sigmoid(x, alpha):
return 1 / (1 + np.exp(-alpha * x))
def dx_sigmoid(x, alpha):
exp_neg_x = np.exp(-alpha * x)
return alpha * exp_neg_x / ((1 + exp_neg_x)**2)
def ReLU(x):
return np.maximum(0, x)
def dx_ReLU(x):
return np.where(x > 0, 1, 0)
def one_hot(y):
one_hot_y = np.zeros((y.size, y.max() + 1))
one_hot_y[np.arange(y.size), y] = 1
one_hot_y = one_hot_y.T
return one_hot_y
def forward_pass(X, w_1, b_1, w_2, b_2):
layer_1 = np.dot(w_1, X) + b_1
layer_1_act = ReLU(layer_1)
layer_2 = np.dot(w_2, layer_1_act) + b_2
layer_2_act = sigmoid(layer_2, 0.01)
return layer_1, layer_1_act, layer_2, layer_2_act
def backward_pass(layer_1, layer_1_act, layer_2, layer_2_act, X, y, w_2):
one_hot_y = one_hot(y)
n_samples = one_hot_y.shape[1]
d_loss_by_layer_2_act = (2 / n_samples) * np.sum(one_hot_y - layer_2_act, axis=1).reshape(-1, 1)
d_layer_2_act_by_layer_2 = dx_sigmoid(layer_2, 0.01)
d_loss_by_layer_2 = d_loss_by_layer_2_act * d_layer_2_act_by_layer_2
d_layer_2_by_w_2 = layer_1_act.T
d_loss_by_w_2 = np.dot(d_loss_by_layer_2, d_layer_2_by_w_2)
d_loss_by_b_2 = np.sum(d_loss_by_layer_2, axis=1).reshape(-1, 1)
d_layer_2_by_layer_1_act = w_2.T
d_loss_by_layer_1_act = np.dot(d_layer_2_by_layer_1_act, d_loss_by_layer_2)
d_layer_1_act_by_layer_1 = dx_ReLU(layer_1)
d_loss_by_layer_1 = d_loss_by_layer_1_act * d_layer_1_act_by_layer_1
d_layer_1_by_w_1 = X.T
d_loss_by_w_1 = np.dot(d_loss_by_layer_1, d_layer_1_by_w_1)
d_loss_by_b_1 = np.sum(d_loss_by_layer_1, axis=1).reshape(-1, 1)
return d_loss_by_w_1, d_loss_by_b_1, d_loss_by_w_2, d_loss_by_b_2
for epoch in range(epochs):
layer_1, layer_1_act, layer_2, layer_2_act = forward_pass(X_train, w_1, b_1, w_2, b_2)
d_loss_by_w_1, d_loss_by_b_1, d_loss_by_w_2, d_loss_by_b_2 = backward_pass(layer_1, layer_1_act,
layer_2, layer_2_act,
X_train, y_train,
w_2)
w_1 -= learning_rate * d_loss_by_w_1
b_1 -= learning_rate * d_loss_by_b_1
w_2 -= learning_rate * d_loss_by_w_2
b_2 -= learning_rate * d_loss_by_b_2
_, _, _, predictions = forward_pass(X_train, w_1, b_1, w_2, b_2)
predictions = predictions.argmax(axis=0)
accuracy = accuracy_score(predictions, y_train)
print(f"epoch: {epoch} / acuracy: {accuracy}")
My loss is MSE: (1 / n_samples) * np.sum((one_hot_y - layer_2_act)**2, axis=0)
This is my
calculations
calculations
I tried to decrease lr, set the alpha coefficient to the exponent (e^(-alpha * x) for sigmoid), I divided my entire sample by 255. and still the program cannot learn because the numbers are too large
To start the unifrom initialization you are using has a relatively big std, for linear layer you should be 1/sqrt(fin) , which for first layer will be :
1 / np.sqrt(128)
0.08838834764831843
which means:
w_1 = np.random.uniform(-0.08, 0.08, (128, 784))
...
also did not check your forward and backward path, assuming if it is correct and you see very big values in your activation, you could as well normalize (like using an implementation of batchnorm or layer norm) to force centred around zero with unit std.
P.S:
also noticed you as well doing a multi-class, then MSE would not be a good choice, use Softmax or logSoftmax (easier implementation), but why loss is not moving fast enough could also be linked to not a good LR as well. and do your inputs normalized?
you could plot the dist for layers and see if they are good.
I wrote a RNN with LSTM cell with Pycharm. The peculiarity of this network is that the output of the RNN is fed into a integration opeartion, computed with Runge-kutta.
The integration takes some input and propagate that in time one step ahead. In order to do so I need to slice the feature tensor X along the batch dimension, and pass this to the Runge-kutta.
class MyLSTM(torch.nn.Module):
def __init__(self, ni, no, sampling_interval, nh=10, nlayers=1):
super(MyLSTM, self).__init__()
self.device = torch.device("cpu")
self.dtype = torch.float
self.ni = ni
self.no = no
self.nh = nh
self.nlayers = nlayers
self.lstms = torch.nn.ModuleList(
[torch.nn.LSTMCell(self.ni, self.nh)] + [torch.nn.LSTMCell(self.nh, self.nh) for i in range(nlayers - 1)])
self.out = torch.nn.Linear(self.nh, self.no)
self.do = torch.nn.Dropout(p=0.2)
self.actfn = torch.nn.Sigmoid()
self.sampling_interval = sampling_interval
self.scaler_states = None
# Options
# description of the whole block
def forward(self, x, h0, train=False, integrate_ode=True):
x0 = x.clone().requires_grad_(True)
hs = x # initiate hidden state
if h0 is None:
h = torch.zeros(hs.shape[0], self.nh, device=self.device)
c = torch.zeros(hs.shape[0], self.nh, device=self.device)
else:
(h, c) = h0
# LSTM cells
for i in range(self.nlayers):
h, c = self.lstms[i](hs, (h, c))
if train:
hs = self.do(h)
else:
hs = h
# Output layer
# y = self.actfn(self.out(hs))
y = self.out(hs)
if integrate_ode:
p = y
y = self.integrate(x0, p)
return y, (h, c)
def integrate(self, x0, p):
# RK4 steps per interval
M = 4
DT = self.sampling_interval / M
X = x0
# X = self.scaler_features.inverse_transform(x0)
for b in range(X.shape[0]):
xx = X[b, :]
for j in range(M):
k1 = self.ode(xx, p[b, :])
k2 = self.ode(xx + DT / 2 * k1, p[b, :])
k3 = self.ode(xx + DT / 2 * k2, p[b, :])
k4 = self.ode(xx + DT * k3, p[b, :])
xx = xx + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
X_all[b, :] = xx
return X_all
def ode(self, x0, y):
# Here I a dynamic model
I get this error:
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor []], which is output 0 of SelectBackward, is at version 64; expected version 63 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).
the problem is in the operations xx = X[b, :] and p[b,:]. I know that because I choose batch dimension of 1, then I can replace the previous two equations with xx=X and p, and this works. How can split the tensor without loosing the gradient?
I had the same question, and after a lot of searching, I added .detach() function after "h" and "c" in the RNN cell.
I have hundred Entries in csv file.
Physics,Maths,Status_class0or1
30,40,0
90,70,1
Using above data i am trying to build logistic (binary) classifier.
Please advise me where i am doing wrong ? Why i am getting answer in 3*3 Matrix (9 values of theta, where as it should be 3 only)
Here is code:
importing the libraries
import numpy as np
import pandas as pd
from sklearn import preprocessing
reading data from csv file.
df = pd.read_csv("LogisticRegressionFirstBinaryClassifier.csv", header=None)
df.columns = ["Maths", "Physics", "AdmissionStatus"]
X = np.array(df[["Maths", "Physics"]])
y = np.array(df[["AdmissionStatus"]])
X = preprocessing.normalize(X)
X = np.c_[np.ones(X.shape[0]), X]
theta = np.ones((X.shape[1], 1))
print(X.shape) # (100, 3)
print(y.shape) # (100, 1)
print(theta.shape) # (3, 1)
calc_z to caculate dot product of X and theta
def calc_z(X,theta):
return np.dot(X,theta)
Sigmoid function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
Cost_function
def cost_function(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
print("cost_function =" , cost_function(X, y, theta))
def derivativeofcostfunction(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
calculation = np.dot((h - y).T,X)
return calculation
print("derivativeofcostfunction=", derivativeofcostfunction(X, y, theta))
def grad_desc(X, y, theta, lr=.001, converge_change=.001):
cost = cost_function(X, y, theta)
change_cost = 1
num_iter = 1
while(change_cost > converge_change):
old_cost = cost
print(theta)
print (derivativeofcostfunction(X, y, theta))
theta = theta - lr*(derivativeofcostfunction(X, y, theta))
cost = cost_function(X, y, theta)
change_cost = old_cost - cost
num_iter += 1
return theta, num_iter
Here is the output :
[[ 0.4185146 -0.56877556 0.63999433]
[15.39722864 9.73995197 11.07882445]
[12.77277463 7.93485324 9.24909626]]
[[0.33944777 0.58199037 0.52493407]
[0.02106587 0.36300629 0.30297278]
[0.07040604 0.3969297 0.33737757]]
[[-0.05856159 -0.89826735 0.30849185]
[15.18035041 9.59004868 10.92827046]
[12.4804775 7.73302024 9.04599788]]
[[0.33950634 0.58288863 0.52462558]
[0.00588552 0.35341624 0.29204451]
[0.05792556 0.38919668 0.32833157]]
[[-5.17526527e-01 -1.21534937e+00 -1.03387571e-02]
[ 1.49729502e+01 9.44663458e+00 1.07843504e+01]
[ 1.21978140e+01 7.53778010e+00 8.84964495e+00]]
(array([[ 0.34002386, 0.58410398, 0.52463592],
[-0.00908743, 0.34396961, 0.28126016],
[ 0.04572775, 0.3816589 , 0.31948193]]), 46)
I changed this code , just added Transpose while returning the matrix and it fixed my issue.
def derivativeofcostfunction(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
calculation = np.dot((h - y).T,X)
return calculation.T
I have implemented a lambda function to resize an image from 28x28x1 to 224x224x3. I need to subtract the VGG mean from all the channels. When i try this, i get an error
TypeError: 'Tensor' object does not support item assignment
def try_reshape_to_vgg(x):
x = K.repeat_elements(x, 3, axis=3)
x = K.resize_images(x, 8, 8, data_format="channels_last")
x[:, :, :, 0] = x[:, :, :, 0] - 103.939
x[:, :, :, 1] = x[:, :, :, 1] - 116.779
x[:, :, :, 2] = x[:, :, :, 2] - 123.68
return x[:, :, :, ::-1]
What's the recommended solution to do element wise subtraction of tensors?
You can use keras.applications.imagenet_utils.preprocess_input on tensors after Keras 2.1.2. It will subtract the VGG mean from x under the default mode 'caffe'.
from keras.applications.imagenet_utils import preprocess_input
def try_reshape_to_vgg(x):
x = K.repeat_elements(x, 3, axis=3)
x = K.resize_images(x, 8, 8, data_format="channels_last")
x = preprocess_input(x)
return x
If you would like to stay in an older version of Keras, maybe you can check how it is implemented in Keras 2.1.2, and extract useful lines into try_reshape_to_vgg.
def _preprocess_symbolic_input(x, data_format, mode):
global _IMAGENET_MEAN
if mode == 'tf':
x /= 127.5
x -= 1.
return x
if data_format == 'channels_first':
# 'RGB'->'BGR'
if K.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
if _IMAGENET_MEAN is None:
_IMAGENET_MEAN = K.constant(-np.array([103.939, 116.779, 123.68]))
# Zero-center by mean pixel
if K.dtype(x) != K.dtype(_IMAGENET_MEAN):
x = K.bias_add(x, K.cast(_IMAGENET_MEAN, K.dtype(x)), data_format)
else:
x = K.bias_add(x, _IMAGENET_MEAN, data_format)
return x
I understand how and why to use an ImageDataGenerator, but I am interested in casting an eyeball on how the ImageDataGenerator affects my images so I can decide whether I have chosen a good amount of latitude in augmenting my data. I see that I can iterate over the images coming from the generator. I am looking for a way to see whether it's an original image or a modified image, and if the latter what parameters were modified in that particular instance I'm looking at. How/can I see this?
Most of the transformations (except flipping) will always modify the input image. For example, if you've specified rotation_range, from the source code:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
it's unlikely that the random number will be exactly 0.
There's no convenient way to print out the amount of transformations applied to each image. You have to modify the source code and add some printing functions inside ImageDataGenerator.random_transform().
If you don't want to touch the source code (for example, on a shared machine), you can extend ImageDataGenerator and override random_transform().
import numpy as np
from keras.preprocessing.image import *
class MyImageDataGenerator(ImageDataGenerator):
def random_transform(self, x, seed=None):
# these lines are just copied-and-pasted from the original random_transform()
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
# print out the trasformations applied to the image
print('Rotation:', theta / np.pi * 180)
print('Height shift:', tx / x.shape[img_row_axis])
print('Width shift:', ty / x.shape[img_col_axis])
print('Shear:', shear)
print('Zooming:', zx, zy)
return x
I just add 5 prints at the end of the function. Other lines are copied and pasted from the original source code.
Now you can use it with, e.g.,
gen = MyImageDataGenerator(rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.5)
flow = gen.flow_from_directory('data', batch_size=1)
img = next(flow)
and see information like this printed on your terminal:
Rotation: -9.185074669096467
Height shift: 0.03791625365979884
Width shift: -0.08398553078553198
Shear: 0
Zooming: 1.40950509832 1.12895574928