Unexpected result of convolution operation - machine-learning

Here is code I wrote to perform a single convolution and output the shape.
Using formula from http://cs231n.github.io/convolutional-networks/ to calculate output size :
You can convince yourself that the correct formula for calculating how
many neurons “fit” is given by (W−F+2P)/S+1
The formula for computing the output size has been implemented below as
def output_size(w , f , stride , padding) :
return (((w - f) + (2 * padding)) / stride) + 1
The issue is output_size computes a size of 2690.5 which differs to the result of the convolution which is 1350 :
%reset -f
import torch
import torch.nn.functional as F
import numpy as np
from PIL import Image
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from pylab import plt
plt.style.use('seaborn')
%matplotlib inline
width = 60
height = 30
kernel_size_param = 5
stride_param = 2
padding_param = 2
img = Image.new('RGB', (width, height), color = 'red')
in_channels = 3
out_channels = 3
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size_param,
stride=stride_param,
padding=padding_param))
def forward(self, x):
out = self.layer1(x)
return out
# w : input volume size
# f : receptive field size of the Conv Layer neurons
# output_size computes spatial size of output volume - spatial dimensions are (width, height)
def output_size(w , f , stride , padding) :
return (((w - f) + (2 * padding)) / stride) + 1
w = width * height * in_channels
f = kernel_size_param * kernel_size_param
print('output size :' , output_size(w , f , stride_param , padding_param))
model = ConvNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=.001)
img_a = np.array(img)
img_pt = torch.tensor(img_a).float()
result = model(img_pt.view(3, width , height).unsqueeze_(0))
an = result.view(30 , 15 , out_channels).data.numpy()
# print(result.shape)
# print(an.shape)
# print(np.amin(an.flatten('F')))
print(30 * 15 * out_channels)
Have I implemented output_size correctly ? How to amend this model so the result of Conv2d has same shape as result of output_size ?

The problem is that your input image is not a square, so you should apply the formula on the width and the heigth of the input image.
And also you should not use the nb_channels in the formula because we are explicitly defining how many channels we want in the output.
Then you use your f=kernel_size and not f=kernel_size*kernel_size as described in the formula.
w = width
h = height
f = kernel_size_param
output_w = int(output_size(w , f , stride_param , padding_param))
output_h = int(output_size(h , f , stride_param , padding_param))
print("Output_size", [out_channels, output_w, output_h]) #--> [1, 3, 30 ,15]
And then output size :
print("Output size", result.shape) #--> [1, 3, 30 ,15]
Formula source : http://cs231n.github.io/convolutional-networks/

Related

Pytorch Batch Size issue when comparing outputs form model and labels

Im having issues with the input and output size being halfed from 16 to 8 when running through my model.I've tried tweaking the input/output size between the maxpool and linear layer, that doesn't work. I was wondering if it has something to do with my loss criterion inputs or if the model should be outputting 16 instead of 8.
import torch
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from os import listdir
import os
from os.path import isdir
from torchsummary import summary
# Define the preprocessing steps
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# Define the custom dataset
class VideoDataset(Dataset):
def __init__(self, data_dir, transform=None):
self.data_dir = data_dir
self.transform = transform
def __len__(self):
return len(self.data_dir)
def __getitem__(self, idx):
video_dir = self.data_dir[idx]
print(video_dir)
video = []
for i in range(10): # For example, each video has 10 frames
img = Image.open(f"{video_dir}/frame_{i}.jpg")
if self.transform:
img = self.transform(img)
video.append(img)
video = torch.stack(video)
if(video_dir.find("squat")):
label = 1
if(video_dir.find("pull")):
label = 0
else:
label = 0
# label = str(video_dir.split("/")[-2]) # Assume the class label is included in the video directory name
sample = {'video': video, 'label': label}
#print(sample)
return sample
# Load the data
path = "videos/squat/"
path_pullups = "videos/pull ups/"
path_situp = "videos/situp/"
data_dir = list()
for file in os.scandir(path):
if file.is_dir():
data_dir.append(path + file.name)
for file in os.scandir(path_pullups):
if file.is_dir():
data_dir.append(path_pullups + file.name)
for file in os.scandir(path_situp):
if file.is_dir():
data_dir.append(path_situp + file.name)
print(len(data_dir)/2)
# Split the data into training and validation sets
train_data = VideoDataset(data_dir[:243], transform=transform) # Use first two classes for training
#print("train" + str(train_data.data_dir))
#valid_data = VideoDataset(data_dir[165:], transform=transform) # Use last class for validation
# Define the data loaders
train_loader = DataLoader(train_data, batch_size=16, shuffle=True)
#valid_loader = DataLoader(valid_data, batch_size=16, shuffle=False)
# Define the CNN model
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv3d(10, 16, kernel_size=(3, 3, 3), stride=1, padding=1)
self.pool = torch.nn.MaxPool3d(kernel_size=(2, 2, 2), stride=2, padding=0)
self.fc1 = torch.nn.Linear(16 * 8 * 8 * 8, 32) #16*16*2
self.fc2 = torch.nn.Linear(32, 3)
self.fc3 = torch.nn.Linear(3, 1)
def forward(self, x):
x = self.pool(torch.nn.functional.relu(self.conv1(x)))
x = x.view(-1, 16 * 8 * 8 * 8)
x = torch.nn.functional.relu(self.fc1(x))
x = self.fc2(x)
x = self.fc3(x)
x = torch.sigmoid(x)
return x
# Initialize the model, loss function, and optimizer
model = Net()
criterion = torch.nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Train the model
for epoch in range(10): # Train for 10 epochs
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data['video'], data['label']
# .view(-1,1)
outputs = model(inputs)
#if labels.shape[0] != outputs.shape[0]:
# labels = labels.view(-1, outputs.shape[0]).t()
summary(model, (10, 3, 32, 32), device='cpu')
print("Labels size:" + str(labels.shape))
print("Outputs size:" + str(outputs.shape))
print(outputs, labels)
#####################################################################
loss = criterion(outputs, labels) #### error here
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch + 1} loss: {running_loss / (i + 1)}")
# Evaluate the model
# correct = 0
# total = 0
# with torch.no_grad():
# for data in valid_loader:
# inputs, labels = data['video'], data['label']
# outputs = model(inputs)
# _, predicted = torch.max(outputs.data, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
# print(f"Accuracy of the model on the validation set: {100 * correct / total}%")
Sample inputs are frames from video clips like this:
described by their exercise category such as squats, situps, pullups, etc.
Desired outputs for this model would be a binary representation of either 1 or 0 that each exercise given is a squat or not as labeled and indicated in the dataset customization function.

Pytorch linear regression loss increase

I tried to implement a simple demo that gets a polynomial regression, but the linear model's loss fails to decrease.
I am confused about where I went wrong.
If I trained the model one sample(batch size = 1) each time, it works fine. but when I feed the model with many samples a time, the loss increase and get inf.
import numpy as np
import torch
import math
from matplotlib import pyplot as plt
def rand_series(size):
x = np.linspace(-100, 100, size)
np.random.shuffle(x)
base_y = 20 * np.sin(2 * math.pi / 200 * x)
y = base_y + 10 * np.random.rand(size)
return x, y
def rescale_vec(vector):
vec_as_tensor = torch.tensor(vector, dtype=torch.float32)
max_in_vec = torch.max(vec_as_tensor)
min_in_vec = torch.min(vec_as_tensor)
if max_in_vec - min_in_vec == 0:
return torch.ones(vec_as_tensor.size(), dtype=torch.float32)
else:
return (vec_as_tensor - min_in_vec) / (max_in_vec - min_in_vec)
def rescale(vectors):
if len(vectors.shape) == 1:
return rescale_vec(vectors)
nor_vecs = torch.empty(vectors.shape)
for i in range(vectors.shape[0]):
nor_vecs[i] = rescale_vec(vectors[i])
return nor_vecs
class LinearRegression (torch.nn.Module):
def __init__ (self, power=4):
super().__init__()
self.layer = torch.nn.Linear(power, 1)
def forward(self, x):
return self.layer(x)
def regression(x_, y_, learning_rate):
x = torch.t(torch.tensor(x_, dtype=torch.float32))
y = torch.tensor(y_, dtype=torch.float32)
dim_size = x.size()[1]
print(dim_size, x.size())
model = LinearRegression(dim_size)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
loss_func = torch.nn.MSELoss(reduction='sum')
batch_size = 400
for round in range(50):
sample_indices = torch.randint(0, len(x), (batch_size, ))
x_samples = torch.index_select(x, 0, sample_indices)
y_samples = torch.index_select(y, 0, sample_indices)
optimizer.zero_grad()
y_hat = model(x_samples.view(-1, dim_size))
loss = loss_func(y_hat, y_samples)
print(loss.item())
loss.backward()
optimizer.step()
return model
x_one, y = rand_series(1000)
b = np.ones(len(x_one))
x = np.array([b, x_one, x_one ** 2, x_one ** 3, x_one ** 4, x_one ** 5])
model = regression(rescale(x), torch.tensor(y, dtype=torch.float32), 0.002)
nor_x = rescale(x)
y_hat = model(torch.t(torch.tensor(x, dtype=torch.float32)))
plt.scatter(x_one, y)
plt.scatter(x_one, y_hat.data, c='red')
plt.show()
the loss:
4.7375866968775066e+19
1.6979300048622735e+26
6.0214270068868396e+32
inf
inf
inf
You need to use loss_func = torch.nn.MSELoss(reduction='mean') to solve the NaN problem. A batch of one or two seems to work because the loss was small enough. By adding more epochs, you should see that your loss tend exponentially to infinity.

Logistic Regression not able to find value of theta

I have hundred Entries in csv file.
Physics,Maths,Status_class0or1
30,40,0
90,70,1
Using above data i am trying to build logistic (binary) classifier.
Please advise me where i am doing wrong ? Why i am getting answer in 3*3 Matrix (9 values of theta, where as it should be 3 only)
Here is code:
importing the libraries
import numpy as np
import pandas as pd
from sklearn import preprocessing
reading data from csv file.
df = pd.read_csv("LogisticRegressionFirstBinaryClassifier.csv", header=None)
df.columns = ["Maths", "Physics", "AdmissionStatus"]
X = np.array(df[["Maths", "Physics"]])
y = np.array(df[["AdmissionStatus"]])
X = preprocessing.normalize(X)
X = np.c_[np.ones(X.shape[0]), X]
theta = np.ones((X.shape[1], 1))
print(X.shape) # (100, 3)
print(y.shape) # (100, 1)
print(theta.shape) # (3, 1)
calc_z to caculate dot product of X and theta
def calc_z(X,theta):
return np.dot(X,theta)
Sigmoid function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
Cost_function
def cost_function(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
print("cost_function =" , cost_function(X, y, theta))
def derivativeofcostfunction(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
calculation = np.dot((h - y).T,X)
return calculation
print("derivativeofcostfunction=", derivativeofcostfunction(X, y, theta))
def grad_desc(X, y, theta, lr=.001, converge_change=.001):
cost = cost_function(X, y, theta)
change_cost = 1
num_iter = 1
while(change_cost > converge_change):
old_cost = cost
print(theta)
print (derivativeofcostfunction(X, y, theta))
theta = theta - lr*(derivativeofcostfunction(X, y, theta))
cost = cost_function(X, y, theta)
change_cost = old_cost - cost
num_iter += 1
return theta, num_iter
Here is the output :
[[ 0.4185146 -0.56877556 0.63999433]
[15.39722864 9.73995197 11.07882445]
[12.77277463 7.93485324 9.24909626]]
[[0.33944777 0.58199037 0.52493407]
[0.02106587 0.36300629 0.30297278]
[0.07040604 0.3969297 0.33737757]]
[[-0.05856159 -0.89826735 0.30849185]
[15.18035041 9.59004868 10.92827046]
[12.4804775 7.73302024 9.04599788]]
[[0.33950634 0.58288863 0.52462558]
[0.00588552 0.35341624 0.29204451]
[0.05792556 0.38919668 0.32833157]]
[[-5.17526527e-01 -1.21534937e+00 -1.03387571e-02]
[ 1.49729502e+01 9.44663458e+00 1.07843504e+01]
[ 1.21978140e+01 7.53778010e+00 8.84964495e+00]]
(array([[ 0.34002386, 0.58410398, 0.52463592],
[-0.00908743, 0.34396961, 0.28126016],
[ 0.04572775, 0.3816589 , 0.31948193]]), 46)
I changed this code , just added Transpose while returning the matrix and it fixed my issue.
def derivativeofcostfunction(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
calculation = np.dot((h - y).T,X)
return calculation.T

keras Bidirectional layer using 4 dimension data

I'am designing keras model for classification based on article data.
I have data with 4 dimension as follows
[batch, article_num, word_num, word embedding size]
and i want to feed each (word_num, word embedding) data into keras Bidirectional layer
in order to get result with 3 dimension as follows.
[batch, article_num, bidirectional layer output size]
when i tried to feed 4 dimension data for testing like this
inp = Input(shape=(article_num, word_num, ))
# dims = [batch, article_num, word_num]
x = Reshape((article_num * word_num, ), input_shape = (article_num, word_num))(inp)
# dims = [batch, article_num * word_num]
x = Embedding(word_num, word_embedding_size, input_length = article_num * word_num)(x)
# dims = [batch, article_num * word_num, word_embedding_size]
x = Reshape((article_num , word_num, word_embedding_size),
input_shape = (article_num * word_num, word_embedding_size))(x)
# dims = [batch, article_num, word_num, word_embedding_size]
x = Bidirectional(CuDNNLSTM(50, return_sequences = True),
input_shape=(article_num , word_num, word_embedding_size))(x)
and i got the error
ValueError: Input 0 is incompatible with layer bidirectional_12: expected ndim=3, found ndim=4
how can i achieve this?
If you don't want it to touch the article_num dimension, you can try using a TimeDistributed wrapper. But I'm not certain that it will be compatible with bidirectional and other stuff.
inp = Input(shape=(article_num, word_num))
x = TimeDistributed(Embedding(word_num, word_embedding_size)(x))
#option 1
#x1 shape : (batch, article_num, word_num, 50)
x1 = TimeDistributed(Bidirectional(CuDNNLSTM(50, return_sequences = True)))(x)
#option 2
#x2 shape : (batch, article_num, 50)
x2 = TimeDistributed(Bidirectional(CuDNNLSTM(50)))(x)
Hints:
Don't use input_shape everywhere, you only need it at the Input tensor.
You probably don't need any of the reshapes if you also use a TimeDistributed in the embedding.
If you don't want word_num in the final dimension, use return_sequences=False.

A Simple Network on TensorFlow

I was trying to train a very simple model on TensorFlow. Model takes a single float as input and returns the probability of input being greater than 0. I used 1 hidden layer with 10 hidden units. Full code is shown below:
import tensorflow as tf
import random
# Graph construction
x = tf.placeholder(tf.float32, shape = [None,1])
y_ = tf.placeholder(tf.float32, shape = [None,1])
W = tf.Variable(tf.random_uniform([1,10],0.,0.1))
b = tf.Variable(tf.random_uniform([10],0.,0.1))
layer1 = tf.nn.sigmoid( tf.add(tf.matmul(x,W), b) )
W1 = tf.Variable(tf.random_uniform([10,1],0.,0.1))
b1 = tf.Variable(tf.random_uniform([1],0.,0.1))
y = tf.nn.sigmoid( tf.add( tf.matmul(layer1,W1),b1) )
loss = tf.square(y - y_)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
# Training
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
N = 1000
while N != 0:
batch = ([],[])
u = random.uniform(-10.0,+10.0)
if u >= 0.:
batch[0].append([u])
batch[1].append([1.0])
if u < 0.:
batch[0].append([u])
batch[1].append([0.0])
sess.run(train_step, feed_dict = {x : batch[0] , y_ : batch[1]} )
N -= 1
while(True):
u = raw_input("Give an x\n")
print sess.run(y, feed_dict = {x : [[u]]})
The problem is, I am getting terribly unrelated results. Model does not learn anything and returns irrelevant probabilities. I tried to adjust learning rate and change variable initialization, but I did not get anything useful. Do you have any suggestions?
You are computing only one probability what you want is to have two classes:
greater/equal than zero.
less than zero.
So the output of the network will be a tensor of shape two that will contain the probabilities of each class. I renamed y_ in your example to labels:
labels = tf.placeholder(tf.float32, shape = [None,2])
Next we compute the cross entropy between the result of the network and the expected classification. The classes for positive numbers would be [1.0, 0] and for negative numbers would be [0.0, 1.0].
The loss function becomes:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels)
loss = tf.reduce_mean(cross_entropy)
I renamed the y to logits as that is a more descriptive name.
Training this network for 10000 steps gives:
Give an x
3.0
[[ 0.96353203 0.03686807]]
Give an x
200
[[ 0.97816485 0.02264325]]
Give an x
-20
[[ 0.12095013 0.87537241]]
Full code:
import tensorflow as tf
import random
# Graph construction
x = tf.placeholder(tf.float32, shape = [None,1])
labels = tf.placeholder(tf.float32, shape = [None,2])
W = tf.Variable(tf.random_uniform([1,10],0.,0.1))
b = tf.Variable(tf.random_uniform([10],0.,0.1))
layer1 = tf.nn.sigmoid( tf.add(tf.matmul(x,W), b) )
W1 = tf.Variable(tf.random_uniform([10, 2],0.,0.1))
b1 = tf.Variable(tf.random_uniform([1],0.,0.1))
logits = tf.nn.sigmoid( tf.add( tf.matmul(layer1,W1),b1) )
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels)
loss = tf.reduce_mean(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
# Training
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
N = 1000
while N != 0:
batch = ([],[])
u = random.uniform(-10.0,+10.0)
if u >= 0.:
batch[0].append([u])
batch[1].append([1.0, 0.0])
if u < 0.:
batch[0].append([u])
batch[1].append([0.0, 1.0])
sess.run(train_step, feed_dict = {x : batch[0] , labels : batch[1]} )
N -= 1
while(True):
u = raw_input("Give an x\n")
print sess.run(logits, feed_dict = {x : [[u]]})

Resources