cvxpy infeasible error with different number of samples - cvxpy

Does anyone know why cvxpy throws an infeasible error when I change the number of samples in constrained OLS? I'm attaching code to re-create my issue. n=100000 is fine, but n=400000 fails.
import cvxpy as cp
import numpy as np
class constrained_ols:
def __init__ (self, xdim=6):
self.xdim = xdim
return None
def fit(self, x, y):
import cvxpy as cp
w = cp.Variable(self.xdim)
i = cp.Variable()
quad_prog = cp.Minimize(cp.sum_squares(y-(x#w+i)))
cons = [w>=0, cp.sum(w)<=1.02, cp.sum(w)>=.98]
problem = cp.Problem(quad_prog, cons)
problem.solve()
print(problem.status)
self.coef_ = w.value
self.intercept_ = i.value
def predict(self, x):
return x # self.coef_
n = 100000
x = np.random.normal(0,1,(n,10))
y = np.random.normal(0,1,n)
model=constrained_ols(xdim=10)
model.fit(x,y)
model.coef_,model.intercept_
I was expecting to get a vector of 10 coefficients and an intercept but instead I got none values.

Related

How to deal with big dataset when using pyG?

I am a beginer learning to using torch_geometric to build my GNN models. I refered the sample of the pyG example of node classification and build my own dataset, however, I tried to use my GPU to run the code and it tells me that it run out of memory, maybe my dataset is too large to allocate the GPU memory? I don't know. I shared an machine of 8 A100 with my classmates. Could you please give me some suggestions, thank you!
from torch_geometric.nn import GATConv,GCNConv
from torch_geometric.data import Dataset,DataLoader,HeteroData,Data
import torch.nn as nn
from torch_geometric.nn import DataParallel
from torch_geometric.loader import DataListLoader
import torch.nn.functional as F
import torch
import pandas as pd
from transformers import BertTokenizer,BertModel
import pickle
import time
from tqdm import tqdm
from numba import jit
import json
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
plt.grid(True)
plt.grid(color='gray',
linestyle='--',
linewidth=1,
alpha=0.3)
begin = time.time()
punctuation = "!#$%&'\(\)-*+,-./:;<=>?#\\\[\]^_`{|}~():;,。【】·、“”‘’《》\"%……——·"
def dataCleanifier(s):
for i in punctuation:
s.replace(i," ")
s = s.replace(" "," ")
s = s.replace("\n","")
return s
class BertClassifier(nn.Module):
def __init__(self,bertType:str,max_length,tag_size):
super(BertClassifier,self).__init__()
self.bertType = bertType
self.tokenizer = BertTokenizer.from_pretrained(self.bertType)
self.encoder = BertModel.from_pretrained(self.bertType)
self.outputDim = self.encoder.pooler.dense.out_features
self.max_length = max_length
self.tag_size = tag_size
self.dropout = nn.Dropout(0.1)
self.activation = nn.LeakyReLU(0.1)
self.convs = nn.ModuleList(
[nn.Conv2d(1, 512, (k, self.outputDim)) for k in (2,3,4)])
self.fc_cnn = nn.Linear(512 * len((2,3,4)), self.tag_size)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self,x):
x = self.tokenizer.batch_encode_plus(x,return_tensors="pt",max_length=self.max_length,truncation=True,padding="max_length")
attention = x["attention_mask"]
x = x["input_ids"]
x = x.cuda(2)
x = self.encoder(x,attention_mask=attention.cuda(2))['last_hidden_state'][:]
x = x.unsqueeze(1)
encoded = torch.cat([self.conv_and_pool(x,conv) for conv in self.convs],1)
x = self.fc_cnn(encoded)
x = self.activation(x)
# x = F.softmax(x,dim=1)
return x,encoded
class ContrastiveLoss(nn.Module):
def __init__(self):
super(ContrastiveLoss, self).__init__()
def forward(self,representations,label,y_hat):
n = label.shape[0]
T = 0.5
similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2)
mask = torch.ones_like(similarity_matrix) * (label.expand(n, n).eq(label.expand(n, n).t()))
mask_no_sim = torch.ones_like(mask) - mask
mask_dui_jiao_0 = torch.ones(n ,n) - torch.eye(n, n )
similarity_matrix = torch.exp(similarity_matrix/T)
similarity_matrix = similarity_matrix*mask_dui_jiao_0
sim = mask*similarity_matrix
no_sim = similarity_matrix - sim
no_sim_sum = torch.sum(no_sim , dim=1)
no_sim_sum_expend = no_sim_sum.repeat(n, 1).T
sim_sum = sim + no_sim_sum_expend
loss = torch.div(sim , sim_sum)
loss = mask_no_sim + loss + torch.eye(n, n )
#接下来就是算一个批次中的loss了
loss = -torch.log(loss) #求-log
loss = torch.sum(torch.sum(loss, dim=1) )/(2*n)+nn.CrossEntropyLoss()(y_hat,label)
return loss
class GAT(nn.Module):
def __init__(self, hidden_channels) -> None:
super().__init__()
self.conv1 = GATConv(data.num_features,hidden_channels)
self.conv2 = GATConv(hidden_channels,9)
self.activation = nn.ReLU()
def forward(self,x,edge_index):
x = self.conv1(x,edge_index)
x = self.activation(x)
# print(x)
# x = F.dropout(x,p=0.2)
x = self.conv2(x,edge_index)
return x
x=None
y=None
edge_index = None
train_mask = None
with open("X.pkl","rb") as f1:
x = pickle.load(f1)
with open("Y.pkl","rb") as f2:
y = pickle.load(f2)
y = y.long()
with open("edge_index.pkl","rb") as f3:
edge_index = pickle.load(f3)
# print(edge_index.shape)
with open("train_mask.pkl","rb") as f4:
train_mask = pickle.load(f4)
data = Data(x=x,y=y,edge_index=edge_index)
data.train_mask = train_mask
model = GAT(hidden_channels=32)
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
scheduler = lr_scheduler.StepLR(optimizer, 100, 0.8)
criterion = ContrastiveLoss()
def train():
model.train()
optimizer.zero_grad() # Clear gradients.
out = model(data.x,data.edge_index) # Perform a single forward pass.
loss = criterion(data.x[data.train_mask], data.y[data.train_mask],out[data.train_mask]) # Compute the loss solely based on the training nodes.
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss
def test():
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1) # Use the class with highest probability.
test_correct = pred[data.train_mask] == data.y[data.train_mask] # Check against ground-truth labels.
test_acc = int(test_correct.sum()) / int(data.train_mask.sum()) # Derive ratio of correct predictions.
return test_acc
accs = []
for epoch in range(1, 1025):
loss = train()
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}',end=" ")
acc = test()
print("acc:",acc)
accs.append(acc)
scheduler.step()
plt.plot(range(len(accs)),accs)
print(time.time()-begin)
with open("./accs_gat_GCL.pkl","wb") as f1:
pickle.dump(accs,f1)
plt.savefig("./res_GAT_GCL.png",dpi=600)
I have tried to use DataPararel to use multiple GPU to load my model and dataset but failed.

CVXPY crashes on kernelized lasso

I'm attempting to run CVXPY to solve a kernelized lasso regression.
When the number of predictors goes up (my goal is to have 3000 of them), it crashes with either a "Killed" error or a "bad alloc" error.
import cvxpy as cp
import numpy as np
import scipy
np.random.seed(0)
NUM_PREDICTORS = 500
NUM_SAMPLES = 1
l1 = 10
x = np.random.randn(NUM_SAMPLES, NUM_PREDICTORS)
y = np.random.randn(NUM_SAMPLES, 1)
xx = x.T # x
yx = y.T # x
xx_sqrt = scipy.linalg.sqrtm(xx)
b = cp.Variable(yx.T.shape)
u = cp.sum_squares(xx_sqrt # b) - cp.sum(2 * yx # b) + l1 * cp.norm(b, 1)
obj = cp.Minimize(u)
prob = cp.Problem(obj)
prob.solve()
print('done')

The gradients are all 0 when backwards and the parameters didnot change at all

I implement the policy gradient method to learn the unknown function( which is a 10 loop sum function here), but the model did not update. The learning data is input and the target. func2 include the MLP model which to predict the target number. The code is the following:
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
import torch.optim as opt
import torch
from torch.autograd.variable import Variable
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(1, 8)
self.fc2 = nn.Linear(8, 8)
self.fc3 = nn.Linear(8, 1)
def forward(self, x):
x = self.fc1(x)
x=F.relu(x)
x = self.fc2(x)
x=F.relu(x)
x=self.fc3(x)
x=F.sigmoid(x)
return x
def assertEqual(label, pre_number):
reward = -torch.sum((label - pre_number).pow(2))
return reward
def func_mulsum(model, s,scalar,n_loop):
c=s
for i in range(n_loop):
c = model(c)
return c
def train_test_data(n_loop):
nums=100
rate=0.7
train_data=np.zeros((int(nums*rate),2))
test_data=np.zeros((int(nums*(1-rate)),2))
data=random.sample(range(nums),nums)
train=data[:int(nums*rate)]
test = data[int(nums*rate):]
train_data[:,0]=train
test_data[:,0]=test
for i,ans in enumerate(train):
for j in range(n_loop):
ans+=j
train_data[i,1]=ans
for i,ans1 in enumerate(test):
for j in range(n_loop):
ans1+=j
test_data[i,1]=ans1
return train_data,test_data
if __name__ == '__main__':
n_loop=10
iterations=10
learn_data, test_case = train_test_data(n_loop)
model=MLP()
optim=opt.SGD(model.parameters(),lr=0.05)
for i in range(iterations):
reward_sum=0
learn_data = torch.FloatTensor(learn_data)
for j,data in enumerate(learn_data[:,0]):
data=data.unsqueeze(0)
label=learn_data[j,1]
optim.zero_grad()
pre=func_mulsum(model,data,255,n_loop)
p_norm = torch.normal(pre, std=0.000000001)
reward = assertEqual(p_norm,label)
# print(p_norm,label)
loss=reward*(torch.log(p_norm)*-1)
loss.backward()
reward_sum += loss
optim.step()
for para in model.parameters():
pass
print(para)
print('reward_mean............................', reward_sum)
I could not find the reason why the gradients are all 0. this question confused me for 2 days. Anyone can help me?

Unable to write gradient step in theano for rnn

I have following code in which I convert words to one hot vectors and do a gradient descent in theano using rnn for predicting next words given a sequence of words(basically a language model).
# coding: utf-8
# In[68]:
#Importing stuff
import theano
import theano.tensor as T
import numpy as np
# In[69]:
import nltk
import sys
import operator
import csv
import itertools
from utils import *
from datetime import datetime
# In[70]:
#Fixing vocabulary size for one hot vectors and some initialization stuff
v_size = 8000
unknown_token = "UNKNOWN_TOKEN"
start_token = "<s>"
end_token = "</s>"
# In[71]:
#Read data and start preprocessing
with open('reddit-comments-2015-08.csv','rb') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
sentences = list(itertools.chain(*[nltk.sent_tokenize(x[0].decode('utf-8')) for x in reader]))
print len(sentences)
# In[72]:
#Tokenize the sentences and add start and end tokens
tokenized_sentences = [nltk.word_tokenize(s) for s in sentences]
tokenized_sentences = [[start_token] + s + [end_token] for s in tokenized_sentences]
# In[73]:
#Get word frequencies and use only most frequent words in vocabulary
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
vocab = word_freq.most_common(v_size-1)
# In[74]:
#Do mapping and reverse mapping
index_to_word = [x[0] for x in vocab]
index_to_word.append(unknown_token)
word_to_index = {w:i for i,w in enumerate(index_to_word)}
#Removing less frequent words
for i, s in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else unknown_token for w in s]
#Got vectors but they are not one hot
X_train = np.asarray([[word_to_index[w] for w in s[:-1]] for s in tokenized_sentences])
Y_train = np.asarray([[word_to_index[w] for w in s[1:]] for s in tokenized_sentences])
#Preprocessing ends here
# In[75]:
#Take only one sentence for now
X_train = X_train[0]
Y_train = Y_train[0]
# In[76]:
#Make input and output as onehot vectors. This can easily be replaced with vectors generated by word2vec.
X_train_onehot = np.eye(v_size)[X_train]
X = theano.shared(np.array(X_train_onehot).astype('float32'), name = 'X')
Y_train_onehot = np.eye(v_size)[Y_train]
Y = theano.shared(np.array(Y_train_onehot).astype('float32'), name = 'Y')
# In[77]:
#Initializing U, V and W
i_dim = v_size
h_dim = 100
o_dim = v_size
U = theano.shared(np.random.randn(i_dim, h_dim).astype('float32'), name = 'U')
W = theano.shared(np.random.randn(h_dim, h_dim).astype('float32'), name = 'W')
V = theano.shared(np.random.randn(h_dim, o_dim).astype('float32'), name = 'V')
# In[78]:
#forward propagation
s = T.vector('s')
results, updates = theano.scan(lambda x, sm1: T.tanh( T.dot(x, U) + T.dot(sm1, W)),
sequences = X_train_onehot,
outputs_info = s
)
y_hat = T.dot(results, V)
forward_propagation = theano.function(inputs=[s], outputs = y_hat)
# In[80]:
#loss
loss = T.sum(T.nnet.categorical_crossentropy(y_hat, Y))
# In[81]:
#Gradients
dw = T.grad(loss, W)
du = T.grad(loss, U)
dv = T.grad(loss, V)
# In[82]:
#BPTT
learning_rate = T.scalar('learning_rate')
gradient_step = theano.function(inputs = [s, learning_rate],
updates = (
(U, U - learning_rate * du),
(V, V - learning_rate * dv),
(W, W - learning_rate * dw)
)
)
# In[ ]:
But it keeps throwing error at gradient step. I am posting full code because I don't know which step is affecting the error. The following is the screenshot of error in jupyter notebook.
I solved it. The problem is with mismatch of types. I had to typecast du, dv, dw, learning rate to float32. By default, they are float64.

Neural network model not learning?

I tried to model a NN using softmax regression.
After 999 iterations, I got error of about 0.02% for per data point, which i thought was good. But when I visualize the model on tensorboard, my cost function did not reach towards 0 instead I got something like this
And for weights and bias histogram this
I am a beginner and I can't seem to understand the mistake. May be I am using a wrong method to define cost?
Here is my full code for reference.
import tensorflow as tf
import numpy as np
import random
lorange= 1
hirange= 10
amplitude= np.random.uniform(-10,10)
t= 10
random.seed()
tau=np.random.uniform(lorange,hirange)
x_node = tf.placeholder(tf.float32, (10,))
y_node = tf.placeholder(tf.float32, (10,))
W = tf.Variable(tf.truncated_normal([10,10], stddev= .1))
b = tf.Variable(.1)
y = tf.nn.softmax(tf.matmul(tf.reshape(x_node,[1,10]), W) + b)
##ADD SUMMARY
W_hist = tf.histogram_summary("weights", W)
b_hist = tf.histogram_summary("biases", b)
y_hist = tf.histogram_summary("y", y)
# Cost function sum((y_-y)**2)
with tf.name_scope("cost") as scope:
cost = tf.reduce_mean(tf.square(y_node-y))
cost_sum = tf.scalar_summary("cost", cost)
# Training using Gradient Descent to minimize cost
with tf.name_scope("train") as scope:
train_step = tf.train.GradientDescentOptimizer(0.00001).minimize(cost)
sess = tf.InteractiveSession()
# Merge all the summaries and write them out to logfile
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("/tmp/mnist_logs_4", sess.graph_def)
error = tf.reduce_sum(tf.abs(y - y_node))
init = tf.initialize_all_variables()
sess.run(init)
steps = 1000
for i in range(steps):
xs = np.arange(t)
ys = amplitude * np.exp(-xs / tau)
feed = {x_node: xs, y_node: ys}
sess.run(train_step, feed_dict=feed)
print("After %d iteration:" % i)
print("W: %s" % sess.run(W))
print("b: %s" % sess.run(b))
print('Total Error: ', error.eval(feed_dict={x_node: xs, y_node:ys}))
# Record summary data, and the accuracy every 10 steps
if i % 10 == 0:
result = sess.run(merged, feed_dict=feed)
writer.add_summary(result, i)
I got the same plot like you a couple of times.
That happened mostly when I was running tensorboard on multiple log-files. That is, the logdir I gave to TensorBoard contained multiple log-files. Try to run TensorBoard on one single log-file and let me know what happens

Resources