Trapezoidal collocation using cvxpy - cvxpy

I tried trapezoidal collocation using to solve a double integrator control problem. I am doing in CVXPY. This is a two point boundary value problem. After solving there is a jump in control beyond limits to meet the end condition. Could not find reason. I have attached the code below. Thanks in advance
import numpy as np
from cvxpy import *
import matplotlib.pyplot as plt
np.random.seed(1)
n= 2
m = 1
T = 50
alpha = 0.02
beta = 5
A = np.zeros((2,2))
A[0,1] = 1
B= np.zeros((2,1))
B[1,0] = 1
dt = 0.02
x_0 = np.zeros(2)
x = Variable((2,T+1))
u = Variable((1,T+1))
cost = 0
constr =[]
for t in range(T):
cost += sum_squares(u[:,t])
constr += [x[0,t+1] == x[0,t] + 0.5*dt*(x[1,t+1]+x[1,t]) ]
constr += [x[1,t+1] == x[1,t] + 0.5*dt*(u[0,t+1]+u[0,t]) ]
constr +=[x[0,T]==1,x[1,T]==0]
constr +=[x[:,0]==x_0 ]
problem = Problem(Minimize(cost),constr)
#problem.solve()
#problem.solve(solver='SCS',eps=1e-5,max_iters=1000,verbose=True)
problem.solve(verbose=True, solver='ECOS')
f = plt.figure()
ax = f.add_subplot(411)
plt.plot(u[0,:].value)
plt.subplot(4,1,3)
x1 = x[0,:].value
plt.plot(x1)
plt.subplot(4,1,4)
x2 = x[1,:].value
plt.plot(x2)
plt.show()
states and control plot

Related

Implementing linear regression from scratch in python

I'm trying to Implement linear regression in python using the following gradient decent formulas (Notice that these formulas are after partial derive)
slope
y_intercept
but the code keeps giving me wearied results ,I think (I'm not sure) that the error is in the gradient_descent function
import numpy as np
class LinearRegression:
def __init__(self , x:np.ndarray ,y:np.ndarray):
self.x = x
self.m = len(x)
self.y = y
def calculate_predictions(self ,slope:int , y_intercept:int) -> np.ndarray: # Calculate y hat.
predictions = []
for x in self.x:
predictions.append(slope * x + y_intercept)
return predictions
def calculate_error_cost(self , y_hat:np.ndarray) -> int:
error_valuse = []
for i in range(self.m):
error_valuse.append((y_hat[i] - self.y[i] )** 2)
error = (1/(2*self.m)) * sum(error_valuse)
return error
def gradient_descent(self):
costs = []
# initialization values
temp_w = 0
temp_b = 0
a = 0.001 # Learning rate
while True:
y_hat = self.calculate_predictions(slope=temp_w , y_intercept= temp_b)
sum_w = 0
sum_b = 0
for i in range(len(self.x)):
sum_w += (y_hat[i] - self.y[i] ) * self.x[i]
sum_b += (y_hat[i] - self.y[i] )
w = temp_w - a * ((1/self.m) *sum_w)
b = temp_b - a * ((1/self.m) *sum_b)
temp_w = w
temp_b = b
costs.append(self.calculate_error_cost(y_hat))
try:
if costs[-1] > costs[-2]: # If global minimum reached
return [w,b]
except IndexError:
pass
I Used this dataset:-
https://www.kaggle.com/datasets/tanuprabhu/linear-regression-dataset?resource=download
after downloading it like this:
import pandas
p = pandas.read_csv('linear_regression_dataset.csv')
l = LinearRegression(x= p['X'] , y= p['Y'])
print(l.gradient_descent())
But It's giving me [-568.1905905426412, -2.833321633515304] Which is decently not accurate.
I want to implement the algorithm not using external modules like scikit-learn for learning purposes.
I tested the calculate_error_cost function and it worked as expected and I don't think that there is an error in the calculate_predictions function
One small problem you have is that you are returning the last values of w and b, when you should be returning the second-to-last parameters (because they yield a lower cost). This should not really matter that much... unless your learning rate is too high and you are immediately getting a higher value for the cost function on the second iteration. This I believe is your real problem, judging from the dataset you shared.
The algorithm does work on the dataset, but you need to change the learning rate. I ran it in the example below and it gave the result shown in the image. One caveat is that I added a limit to the iterations to avoid the algorithm from taking too long (and only marginally improving the result).
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class LinearRegression:
def __init__(self , x:np.ndarray ,y:np.ndarray):
self.x = x
self.m = len(x)
self.y = y
def calculate_predictions(self ,slope:int , y_intercept:int) -> np.ndarray: # Calculate y hat.
predictions = []
for x in self.x:
predictions.append(slope * x + y_intercept)
return predictions
def calculate_error_cost(self , y_hat:np.ndarray) -> int:
error_valuse = []
for i in range(self.m):
error_valuse.append((y_hat[i] - self.y[i] )** 2)
error = (1/(2*self.m)) * sum(error_valuse)
return error
def gradient_descent(self):
costs = []
# initialization values
temp_w = 0
temp_b = 0
iteration = 0
a = 0.00001 # Learning rate
while iteration < 1000:
y_hat = self.calculate_predictions(slope=temp_w , y_intercept= temp_b)
sum_w = 0
sum_b = 0
for i in range(len(self.x)):
sum_w += (y_hat[i] - self.y[i] ) * self.x[i]
sum_b += (y_hat[i] - self.y[i] )
w = temp_w - a * ((1/self.m) *sum_w)
b = temp_b - a * ((1/self.m) *sum_b)
costs.append(self.calculate_error_cost(y_hat))
try:
if costs[-1] > costs[-2]: # If global minimum reached
print(costs)
return [temp_w,temp_b]
except IndexError:
pass
temp_w = w
temp_b = b
iteration += 1
print(iteration)
return [temp_w,temp_b]
p = pd.read_csv('linear_regression_dataset.csv')
x_data = p['X']
y_data = p['Y']
lin_reg = LinearRegression(x_data, y_data)
y_hat = lin_reg.calculate_predictions(*lin_reg.gradient_descent())
fig = plt.figure()
plt.plot(x_data, y_data, 'r.', label='Data')
plt.plot(x_data, y_hat, 'b-', label='Linear Regression')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()

Perceptron algorithm is not working as I desired

I recently tried implementing perceptron algorithm but I was not getting the desired output.
Here is the code:
import numpy as np
import pandas as pd
with open("D:/data.txt",'r') as data: #importing the data
column = data.read()
split = np.array(column.split('\n'))
final =[]
for string in split:
final.append(string.split(','))
df = pd.DataFrame(final,columns=['x','y','response'])
df['x'] = df['x'].astype(float)
df['y'] = df['y'].astype(float)
df['response'] = df['response'].astype(int)
X = np.array(df[['x','y']])
y = np.array(df['response'])
def perceptron_algorithm(x,y,learning_rate=0.01,num_epoch=25):
np.random.seed(2)
x_min, x_max = min(x.T[0]), max(x.T[0])
y_min, y_max = min(x.T[1]), max(x.T[0])
w = np.array(np.random.rand(2,1))
b = np.random.rand(1)[0] + x_max
print(w,b)
for i in range(num_epoch):
w,b = perceptronstep(x,y,w,b,learning_rate)
print(w,b)
return w,b
def perceptronstep(x,y,w,b,learning_rate):
for i in range(len(x)):
y_hat = prediction(x[i],w,b)
if y_hat-y[i] == 1:
for j in range(len(w)):
w[j] += x[i][j]*learning_rate
b += learning_rate
elif y_hat-y[i] == -1:
for j in range(len(w)):
w[j] -= x[i][j]*learning_rate
b -= learning_rate
return w,b
def prediction(x,w,b):
return step(np.matmul(x,w)+b)
def step(t):
if t >=0:
return 1
else:
return 0
w,b = perceptron_algorithm(X,y)
This is the resulting line:
This is how the data looks:
Is there something wrong with my code ?
Here is the link to the data file:
https://drive.google.com/drive/folders/1TSug9tE6bljyBFv-u3mIGWW6F_3ZY2oa?usp=sharing
Edit: I have added the initial part of the code so it will be clear what I am trying to do.
Edit 2: I have added the data file and the "import pandas as pd" line of code

CVXPY crashes on kernelized lasso

I'm attempting to run CVXPY to solve a kernelized lasso regression.
When the number of predictors goes up (my goal is to have 3000 of them), it crashes with either a "Killed" error or a "bad alloc" error.
import cvxpy as cp
import numpy as np
import scipy
np.random.seed(0)
NUM_PREDICTORS = 500
NUM_SAMPLES = 1
l1 = 10
x = np.random.randn(NUM_SAMPLES, NUM_PREDICTORS)
y = np.random.randn(NUM_SAMPLES, 1)
xx = x.T # x
yx = y.T # x
xx_sqrt = scipy.linalg.sqrtm(xx)
b = cp.Variable(yx.T.shape)
u = cp.sum_squares(xx_sqrt # b) - cp.sum(2 * yx # b) + l1 * cp.norm(b, 1)
obj = cp.Minimize(u)
prob = cp.Problem(obj)
prob.solve()
print('done')

Oversampling or SMOTE in Pyspark

I have 7 classes and the total number of records are 115 and I wanted to run Random Forest model over this data. But as the data is not enough to get a high accuracy. So i wanted to apply oversampling over all the classes in a way that the majority class itself get higher count and then minority accordingly. Is this possible in PySpark?
+---------+-----+
| SubTribe|count|
+---------+-----+
| Chill| 10|
| Cool| 18|
|Adventure| 18|
| Quirk| 13|
| Mystery| 25|
| Party| 18|
|Glamorous| 13|
+---------+-----+
Here is another implementation of Pyspark and Scala smote that I have used in the past. I have copped the code across and referenced the source because its quite small:
Pyspark:
import random
import numpy as np
from pyspark.sql import Row
from sklearn import neighbors
from pyspark.ml.feature import VectorAssembler
def vectorizerFunction(dataInput, TargetFieldName):
if(dataInput.select(TargetFieldName).distinct().count() != 2):
raise ValueError("Target field must have only 2 distinct classes")
columnNames = list(dataInput.columns)
columnNames.remove(TargetFieldName)
dataInput = dataInput.select((','.join(columnNames)+','+TargetFieldName).split(','))
assembler=VectorAssembler(inputCols = columnNames, outputCol = 'features')
pos_vectorized = assembler.transform(dataInput)
vectorized = pos_vectorized.select('features',TargetFieldName).withColumn('label',pos_vectorized[TargetFieldName]).drop(TargetFieldName)
return vectorized
def SmoteSampling(vectorized, k = 5, minorityClass = 1, majorityClass = 0, percentageOver = 200, percentageUnder = 100):
if(percentageUnder > 100|percentageUnder < 10):
raise ValueError("Percentage Under must be in range 10 - 100");
if(percentageOver < 100):
raise ValueError("Percentage Over must be in at least 100");
dataInput_min = vectorized[vectorized['label'] == minorityClass]
dataInput_maj = vectorized[vectorized['label'] == majorityClass]
feature = dataInput_min.select('features')
feature = feature.rdd
feature = feature.map(lambda x: x[0])
feature = feature.collect()
feature = np.asarray(feature)
nbrs = neighbors.NearestNeighbors(n_neighbors=k, algorithm='auto').fit(feature)
neighbours = nbrs.kneighbors(feature)
gap = neighbours[0]
neighbours = neighbours[1]
min_rdd = dataInput_min.drop('label').rdd
pos_rddArray = min_rdd.map(lambda x : list(x))
pos_ListArray = pos_rddArray.collect()
min_Array = list(pos_ListArray)
newRows = []
nt = len(min_Array)
nexs = percentageOver/100
for i in range(nt):
for j in range(nexs):
neigh = random.randint(1,k)
difs = min_Array[neigh][0] - min_Array[i][0]
newRec = (min_Array[i][0]+random.random()*difs)
newRows.insert(0,(newRec))
newData_rdd = sc.parallelize(newRows)
newData_rdd_new = newData_rdd.map(lambda x: Row(features = x, label = 1))
new_data = newData_rdd_new.toDF()
new_data_minor = dataInput_min.unionAll(new_data)
new_data_major = dataInput_maj.sample(False, (float(percentageUnder)/float(100)))
return new_data_major.unionAll(new_data_minor)
dataInput = spark.read.format('csv').options(header='true',inferSchema='true').load("sam.csv").dropna()
SmoteSampling(vectorizerFunction(dataInput, 'Y'), k = 2, minorityClass = 1, majorityClass = 0, percentageOver = 90, percentageUnder = 5)
Scala:
// Import the necessary packages
import org.apache.spark.ml.feature.BucketedRandomProjectionLSH
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.expressions.Window
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.sql.functions.rand
import org.apache.spark.sql.functions._
object smoteClass{
def KNNCalculation(
dataFinal:org.apache.spark.sql.DataFrame,
feature:String,
reqrows:Int,
BucketLength:Int,
NumHashTables:Int):org.apache.spark.sql.DataFrame = {
val b1 = dataFinal.withColumn("index", row_number().over(Window.partitionBy("label").orderBy("label")))
val brp = new BucketedRandomProjectionLSH().setBucketLength(BucketLength).setNumHashTables(NumHashTables).setInputCol(feature).setOutputCol("values")
val model = brp.fit(b1)
val transformedA = model.transform(b1)
val transformedB = model.transform(b1)
val b2 = model.approxSimilarityJoin(transformedA, transformedB, 2000000000.0)
require(b2.count > reqrows, println("Change bucket lenght or reduce the percentageOver"))
val b3 = b2.selectExpr("datasetA.index as id1",
"datasetA.feature as k1",
"datasetB.index as id2",
"datasetB.feature as k2",
"distCol").filter("distCol>0.0").orderBy("id1", "distCol").dropDuplicates().limit(reqrows)
return b3
}
def smoteCalc(key1: org.apache.spark.ml.linalg.Vector, key2: org.apache.spark.ml.linalg.Vector)={
val resArray = Array(key1, key2)
val res = key1.toArray.zip(key2.toArray.zip(key1.toArray).map(x => x._1 - x._2).map(_*0.2)).map(x => x._1 + x._2)
resArray :+ org.apache.spark.ml.linalg.Vectors.dense(res)}
def Smote(
inputFrame:org.apache.spark.sql.DataFrame,
feature:String,
label:String,
percentOver:Int,
BucketLength:Int,
NumHashTables:Int):org.apache.spark.sql.DataFrame = {
val groupedData = inputFrame.groupBy(label).count
require(groupedData.count == 2, println("Only 2 labels allowed"))
val classAll = groupedData.collect()
val minorityclass = if (classAll(0)(1).toString.toInt > classAll(1)(1).toString.toInt) classAll(1)(0).toString else classAll(0)(0).toString
val frame = inputFrame.select(feature,label).where(label + " == " + minorityclass)
val rowCount = frame.count
val reqrows = (rowCount * (percentOver/100)).toInt
val md = udf(smoteCalc _)
val b1 = KNNCalculation(frame, feature, reqrows, BucketLength, NumHashTables)
val b2 = b1.withColumn("ndtata", md($"k1", $"k2")).select("ndtata")
val b3 = b2.withColumn("AllFeatures", explode($"ndtata")).select("AllFeatures").dropDuplicates
val b4 = b3.withColumn(label, lit(minorityclass).cast(frame.schema(1).dataType))
return inputFrame.union(b4).dropDuplicates
}
}
Source
Maybe this project can be useful for your goal:
Spark SMOTE
But I think that 115 records aren't enough for a random forest. You can use other simplest technique like decision trees
You can check this answer:
Is Random Forest suitable for very small data sets?

Cost value doesn't converge

I'm trying code a logistic regression but I'm in trouble getting a convergent COST, can anyone help me? Below are my codes. Thank you!
#input:
m = 3, n = 4
# we have 3 training examples and each of them has 4 features (Sorry, I know it looks weired here). Y is a label matrix.
X = np.array([[1,2,1],[1,1,0],[1,2,1],[1,0,2]])
Y = np.array([[0,1,0]])
h = 100000 #iterations
alpha = 0.05 #learning rate
b = 0 #scalar bias
W = np.zeros(n).reshape(1,n) #weights
J = np.zeros(h).reshape(1,h) #a vector for holing cost value
Yhat = np.zeros(m).reshape(1,m) #predicted value
def activation(yhat):
return 1/(1+np.exp(-yhat))
W=W.T
for g in range(h):
m = X.T.shape[0]
Y_hat = activation(X.dot(W)+b)
cost = -1/m * np.sum(Y*np.log(Y_hat)+(1-Y)*np.log(1-Y_hat))
current_error = Y.T - Y_hat
dW = 1/m * np.dot(X.T, current_error)
db = 1/m * np.sum(current_error)
W = W + alpha * dW
b = b + alpha * db
J[0][g] = cost

Resources