I ran this code for the famous Iris flowers problem, making 10-fold cross validation,then classifying them using 5 different classification methods.
This should make the classifier train on 135 instances and test on 15 ten times, so i expect to have wrong classified instances + correct classified instances = 15.
Following are the code, and the output.
public class WekaTest {
public static void main(String[] args) throws Exception {
// Comments are denoted by "//" at the beginning of the line.
BufferedReader datafile = readDataFile("C:\\Program Files\\Weka-3-8\\data\\iris.arff");
//BufferedReader datafile = readDataFile("C:\\hwork\\titanic\\train.arff");
Instances data = new Instances(datafile);
data.setClassIndex(data.numAttributes() - 1);
// Choose a type of validation split
Instances[][] split = crossValidationSplit(data, 10);
// Separate split into training and testing arrays
Instances[] trainingSplits = split[0];
Instances[] testingSplits = split[1];
// Choose a set of classifiers
Classifier[] models = { new J48(),
new PART(),
new DecisionTable(),
new OneR(),
new DecisionStump() };
// Run for each classifier model
double[][][] predictions = new double[100][100][2];
for(int j = 0; j < models.length; j++) {
for(int i = 0; i < trainingSplits.length; i++) {
Evaluation validation = new Evaluation(trainingSplits[i]);
models[j].buildClassifier(trainingSplits[i]);
validation.evaluateModel(models[j], testingSplits[i]);
predictions[j][i][0] = validation.correct();
predictions[j][i][1] = validation.incorrect();
System.out.println("Classifier: "+models[j].getClass()+" : Correct: "+predictions[j][i][0]+", Wrong: "+predictions[i][j][1]);
}//training foreach fold.
System.out.println("===================================================================");
}//training foreach classifier.
}//main().
public static BufferedReader readDataFile(String filename) {
BufferedReader inputReader = null;
try {
inputReader = new BufferedReader(new FileReader(filename));
} catch (FileNotFoundException ex) {
System.err.println("File not found: " + filename);
}
return inputReader;
}//readDataFile().
public static Evaluation simpleClassify(Classifier model, Instances trainingSet, Instances testingSet) throws Exception {
Evaluation validation = new Evaluation(trainingSet);
model.buildClassifier(trainingSet);
validation.evaluateModel(model, testingSet);
return validation;
}//simpleClassify().
public static double calculateAccuracy(FastVector predictions) {
double correct = 0;
for (int i = 0; i < predictions.size(); i++) {
NominalPrediction np = (NominalPrediction) predictions.elementAt(i);
if (np.predicted() == np.actual()) {
correct++;
}
}
return 100 * correct / predictions.size();
}//calculateAccuracy().
public static Instances[][] crossValidationSplit(Instances data, int numberOfFolds) {
Instances[][] split = new Instances[2][numberOfFolds];
for (int i = 0; i < numberOfFolds; i++) {
split[0][i] = data.trainCV(numberOfFolds, i);
split[1][i] = data.testCV(numberOfFolds, i);
}
return split;
}//corssValidationSplit().
}//class.
====================
The output:
Classifier: class weka.classifiers.trees.J48 : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 14.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 14.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 12.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.J48 : Correct: 15.0, Wrong: 0.0
===================================================================
Classifier: class weka.classifiers.rules.PART : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 14.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 14.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 9.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.PART : Correct: 13.0, Wrong: 0.0
===================================================================
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 15.0, Wrong: 1.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 15.0, Wrong: 1.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 12.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.DecisionTable : Correct: 14.0, Wrong: 0.0
===================================================================
Classifier: class weka.classifiers.rules.OneR : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 14.0, Wrong: 1.0
Classifier: class weka.classifiers.rules.OneR : Correct: 13.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 12.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 15.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 14.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 14.0, Wrong: 0.0
Classifier: class weka.classifiers.rules.OneR : Correct: 14.0, Wrong: 0.0
===================================================================
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 15.0, Wrong: 1.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 15.0, Wrong: 1.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 15.0, Wrong: 2.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 5.0, Wrong: 2.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 0.0, Wrong: 15.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 0.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 5.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 0.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 0.0, Wrong: 0.0
Classifier: class weka.classifiers.trees.DecisionStump : Correct: 0.0, Wrong: 0.0
===================================================================
in the printing line
System.out.println("Classifier: "+models[j].getClass()+" : Correct: "+predictions[j][i][0]+", Wrong: "+predictions[i][j][1]);
the following part
Wrong: "+predictions[i][j][1]);
should be
Wrong: "+predictions[j][i][1]);
You swiched j and i.
Related
I need to iterate over a list of object and sum all "num" for every "title"
var obj1 = MyObject(tNum: 10, tTitle: 'Hi');
var obj2 = MyObject(tNum: 9, tTitle: 'Hi');
var obj3 = MyObject(tNum: 8, tTitle: 'Hello');
var obj4 = MyObject(tNum: 7, tTitle: 'Hello');
var obj5 = MyObject(tNum: 12, tTitle: 'Good');
var myList = [obj1, obj2, obj3, obj4, obj5];
var myListIter = myList.iterator;
var initialTitle = '';
var finalSum = 0;
while (myListIter.moveNext()) {
print(myListIter.current);
if (myListIter.current.title!.compareTo(initialTitle).isEven) {
finalSum = finalSum + myListIter.current.num!;
}else{
print('--- ${myListIter.current.title!} : $finalSum');
}
}
}
class MyObject {
int? num;
String? title;
MyObject({tNum, tTitle}) {
num = tNum;
title = tTitle;
}
}
The output:
--- Hi : 0
--- Hi : 0
--- Hello : 0
--- Hello : 0
--- Good : 0
Expected output:
--- Hi : 19
--- Hello : 15
--- Good : 12
What is the simplest way to do that?
q,nfczej,pvledv;lzecaz
var myMap = <String, int>{};
myList.forEach((obj) => myMap[obj.title!] = (myMap[obj.title] ?? 0) + obj.num!);
print(myMap);
Output:
{Hi: 19, Hello: 15, Good: 12}
I have trained my first CNN model. I took first 100 images of cats and first 100 images of dogs from Kaggle dataset as my custom dataset.
After the model is trained I'm trying to feed the same images back to the model to see predictions. As result I get score from 0.5 to 0.6 on all images. While I though it should be <0.5 for cats and >0.5 for dogs. Is it a problem of my model architecture, the training process or my dataset is just too small? Why no images gets below 0.5 at all?
Here is my code:
First I generate .csv file to be processed:
import pandas as pd
import os
import torch
device = ("cuda" if torch.cuda.is_available() else "cpu")
train_df = pd.DataFrame(columns=["img_name","label"])
train_df["img_name"] = os.listdir("train/")
for idx, i in enumerate(os.listdir("train/")):
if "cat" in i:
train_df["label"][idx] = 0
if "dog" in i:
train_df["label"][idx] = 1
train_df.to_csv (r'train_csv.csv', index = False, header=True)
Then I prepare the dataset:
from torch.utils.data import Dataset
import pandas as pd
import os
from PIL import Image
import torch
class CatsAndDogsDataset(Dataset):
def __init__(self, root_dir, annotation_file, transform=None):
self.root_dir = root_dir
self.annotations = pd.read_csv(annotation_file)
self.transform = transform
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
img_id = self.annotations.iloc[index, 0]
img = Image.open(os.path.join(self.root_dir, img_id)).convert("RGB")
y_label = torch.tensor(float(self.annotations.iloc[index, 1]))
if self.transform is not None:
img = self.transform(img)
return (img, y_label)
This is my model:
import torch.nn as nn
import torchvision.models as models
class CNN(nn.Module):
def __init__(self, train_CNN=False, num_classes=1):
super(CNN, self).__init__()
self.train_CNN = train_CNN
self.inception = models.inception_v3(pretrained=True, aux_logits=False)
self.inception.fc = nn.Linear(self.inception.fc.in_features, num_classes)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
self.sigmoid = nn.Sigmoid()
def forward(self, images):
features = self.inception(images)
return self.sigmoid(self.dropout(self.relu(features))).squeeze(1)
This is my hyper-params, transformations and dataloaders:
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
num_epochs = 10
learning_rate = 0.00001
train_CNN = False
batch_size = 32
shuffle = True
pin_memory = True
num_workers = 0
transform = transforms.Compose(
[
transforms.Resize((356, 356)),
transforms.CenterCrop((299, 299)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
]
)
dataset = CatsAndDogsDataset("train","train_csv.csv",transform=transform)
train_size = int(0.8 * len(dataset))
validation_size = len(dataset) - train_size
train_set, validation_set = torch.utils.data.random_split(dataset, [train_size, validation_size])
train_loader = DataLoader(dataset=train_set, shuffle=shuffle, batch_size=batch_size,num_workers=num_workers,pin_memory=pin_memory)
validation_loader = DataLoader(dataset=validation_set, shuffle=shuffle, batch_size=batch_size,num_workers=num_workers, pin_memory=pin_memory)
model = CNN().to(device)
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for name, param in model.inception.named_parameters():
if "fc.weight" in name or "fc.bias" in name:
param.requires_grad = True
else:
param.requires_grad = train_CNN
and accuracy check:
def check_accuracy(loader, model):
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x)
predictions = torch.tensor([1.0 if i >= 0.5 else 0.0 for i in scores]).to(device)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
model.train()
return f"{float(num_correct)/float(num_samples)*100:.2f}"
And this is my training function:
from tqdm import tqdm
def train():
model.train()
for epoch in range(num_epochs):
loop = tqdm(train_loader, total = len(train_loader), leave = True)
for imgs, labels in loop:
imgs = imgs.to(device)
labels = labels.to(device)
outputs = model(imgs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loop.set_description(f"Epoch [{epoch}/{num_epochs}]")
loop.set_postfix(loss = loss.item(), val_acc = check_accuracy(validation_loader, model))
if __name__ == "__main__":
train()
Epoch [0/10]: 100%|██████████| 6/6 [12:00<00:00, 120.10s/it, loss=0.652, val_acc=39.02]
Epoch [1/10]: 100%|██████████| 6/6 [11:51<00:00, 118.61s/it, loss=0.497, val_acc=39.02]
Epoch [2/10]: 100%|██████████| 6/6 [11:27<00:00, 114.51s/it, loss=0.693, val_acc=39.02]
Epoch [3/10]: 100%|██████████| 6/6 [11:04<00:00, 110.77s/it, loss=0.531, val_acc=39.02]
Epoch [4/10]: 100%|██████████| 6/6 [10:58<00:00, 109.68s/it, loss=0.693, val_acc=39.02]
Epoch [5/10]: 100%|██████████| 6/6 [12:03<00:00, 120.51s/it, loss=0.803, val_acc=39.02]
Epoch [6/10]: 100%|██████████| 6/6 [11:33<00:00, 115.62s/it, loss=0.693, val_acc=39.02]
Epoch [7/10]: 100%|██████████| 6/6 [11:27<00:00, 114.56s/it, loss=0.675, val_acc=39.02]
Epoch [8/10]: 100%|██████████| 6/6 [11:42<00:00, 117.10s/it, loss=0.806, val_acc=39.02]
Epoch [9/10]: 100%|██████████| 6/6 [12:15<00:00, 122.58s/it, loss=0.768, val_acc=39.02]
Then I loop through the model checking predictions on each image (the dataset variable is available because it is in the same Jupyter Notebook):
import numpy as np
with torch.no_grad():
for index in range(len(dataset)):
item = dataset[index]
image_tensor = item[0]
true_target = item[1]
img_np = np.array(image_tensor)
img_normalized = img_np.transpose(1, 2, 0)
image = torch.unsqueeze(image_tensor, 0)
prediction = model(image)
predicted_class = prediction[0]
print("class: " + str(true_target.item()) + " score: " + str(predicted_class.item()))
The output:
class: 0.0 score: 0.547210156917572
class: 0.0 score: 0.5
class: 0.0 score: 0.5348594188690186
class: 0.0 score: 0.5336627960205078
class: 0.0 score: 0.5178861618041992
class: 0.0 score: 0.5692692995071411
class: 0.0 score: 0.5
class: 0.0 score: 0.5381814241409302
class: 0.0 score: 0.54604572057724
class: 0.0 score: 0.5157472491264343
class: 0.0 score: 0.5257323980331421
class: 0.0 score: 0.5137990713119507
class: 0.0 score: 0.5247158408164978
class: 0.0 score: 0.5320644378662109
class: 0.0 score: 0.5775637626647949
class: 0.0 score: 0.528205156326294
class: 0.0 score: 0.5457945466041565
class: 0.0 score: 0.5301501154899597
class: 0.0 score: 0.5102765560150146
class: 0.0 score: 0.5069065690040588
class: 0.0 score: 0.519408106803894
class: 0.0 score: 0.5414850115776062
class: 0.0 score: 0.5041879415512085
class: 0.0 score: 0.5055546760559082
show more (open the raw output data in a text editor) ...
class: 1.0 score: 0.5
class: 1.0 score: 0.5
class: 1.0 score: 0.5166758894920349
class: 1.0 score: 0.5343206524848938
class: 1.0 score: 0.5716230869293213
So no cats get predicted
can you change your model arch to this(just remove dropout and relu)
import torch.nn as nn
import torchvision.models as models
class CNN(nn.Module):
def __init__(self, train_CNN=False, num_classes=1):
super(CNN, self).__init__()
self.train_CNN = train_CNN
self.inception = models.inception_v3(pretrained=True, aux_logits=False)
self.inception.fc = nn.Linear(self.inception.fc.in_features, num_classes)
self.dropout = nn.Dropout(0.5)
self.sigmoid = nn.Sigmoid()
def forward(self, images):
features = self.inception(images)
return self.sigmoid(features).squeeze(1)
And just try with model.eval() before doing inference, Since u have used dropout
I made a simple pytorch MLP(GAN generator) and converted it to onnx using the tutorial (https://www.youtube.com/watch?v=Vs730jsRgO8), my code is a bit different but I cant catch the error.
class Generator(nn.Module):
def __init__(self, g_input_dim, g_output_dim):
super(Generator, self).__init__()
# g_input = 100
self.net = nn.Sequential(
nn.Linear(g_input_dim, 256),
nn.LeakyReLU(.2),
nn.Linear(256, 512),
nn.LeakyReLU(.2),
nn.Linear(512, 1024),
nn.LeakyReLU(.2),
nn.Linear(1024, 784),
nn.Tanh()
)
# forward method
def forward(self, x):
return self.net(x)
After training I export the model to onnx.
torch.save(G.state_dict(), "pytorch_model.pth")
import torch.onnx
model = Generator(z_dim,mnist_dim)
state_dict = torch.load("pytorch_model.pth")
model.load_state_dict(state_dict)
model.eval()
dummy_input = torch.zeros(100)
torch.onnx.export(model, dummy_input, "onnx_model.onnx", verbose=True)
Which gives the following onnx graph, which seems accurate.
graph(%input.1 : Float(100),
%net.0.bias : Float(256),
%net.2.bias : Float(512),
%net.4.bias : Float(1024),
%net.6.bias : Float(784),
%25 : Float(100, 256),
%26 : Float(256, 512),
%27 : Float(512, 1024),
%28 : Float(1024, 784)):
%10 : Float(256) = onnx::MatMul(%input.1, %25) # /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1612:0
%11 : Float(256) = onnx::Add(%10, %net.0.bias)
%12 : Float(256) = onnx::LeakyRelu[alpha=0.20000000000000001](%11) # /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1239:0
%14 : Float(512) = onnx::MatMul(%12, %26) # /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1612:0
%15 : Float(512) = onnx::Add(%14, %net.2.bias)
%16 : Float(512) = onnx::LeakyRelu[alpha=0.20000000000000001](%15) # /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1239:0
%18 : Float(1024) = onnx::MatMul(%16, %27) # /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1612:0
%19 : Float(1024) = onnx::Add(%18, %net.4.bias)
%20 : Float(1024) = onnx::LeakyRelu[alpha=0.20000000000000001](%19) # /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1239:0
%22 : Float(784) = onnx::MatMul(%20, %28) # /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1612:0
%23 : Float(784) = onnx::Add(%22, %net.6.bias)
%24 : Float(784)
Then I imported the code into javascript.
<html>
<body>
<script src="./onnx.min.js"></script>
<script>
async function test() {
const sess = new onnx.InferenceSession()
await sess.loadModel('./onnx_model.onnx')
const input = new onnx.Tensor(new Float32Array(100), 'float32', [100])
const outputMap = await sess.run([input])
const outputTensor = outputMap.values().next().value
console.log(`Output tensor: ${outputTensor.data}`)
}
test()
</script>
</body>
</html>
I know the input dimension is correct but onnx gives me the following error.
onnx.min.js:8 Uncaught (in promise) Error: Can't use matmul on the given tensors
at e.createProgramInfo (onnx.min.js:8)
at t.run (onnx.min.js:8)
at e.run (onnx.min.js:8)
at t.<anonymous> (onnx.min.js:14)
at onnx.min.js:14
at Object.next (onnx.min.js:14)
at onnx.min.js:14
at new Promise (<anonymous>)
at r (onnx.min.js:14)
at onnx.min.js:14
I also know that matmul is a supported operator with onnx, but I can't figure out how or if my input tensor is correct.
I think the matmul operator expects the input to be 2 dimensional. It seems to work when I add a batch size dimension to the input (a batch size of 1):
Before: dummy_input = torch.zeros(100)
After: dummy_input = torch.zeros(1, 100)
Before: const input = new onnx.Tensor(new Float32Array(100), 'float32', [100])
After: const input = new onnx.Tensor(new Float32Array(100), 'float32', [1, 100])
Im trying to create a game which consists of several areas, accessed by moving the circle onto the boundaries of the screen. I created a transit widget and defined a function to switch screens when there's a collision but it keeps giving errors. The error I got is that WindowManager does not have an attribute manager.
.py file:
class Transit(Widget):
def transit(self,circle):
if self.collide_widget(circle):
WindowManager.manager.current = "a1"
pass
class Wall(Widget):
def collision(self, circle):
if circle.collide_widget(self):
if circle.center_x > (self.pos[0] + self.size[0]) or circle.center_x < self.pos[0]:
circle.velocity_x = -1 * circle.velocity_x
elif circle.center_x > self.pos[0] and circle.center_x < (self.pos[0] + self.size[0]):
circle.velocity_y = -1 * circle.velocity_y
class Circle(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class Move(Widget):
circle = ObjectProperty(None)
wall1 = ObjectProperty(None)
wall2 = ObjectProperty(None)
wall3 = ObjectProperty(None)
wall4 = ObjectProperty(None)
transit1 = ObjectProperty(None)
def __init__(self, **kwargs):
super(Move, self).__init__(**kwargs)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down = self._on_keyboard_down)
Clock.schedule_interval(self.update, 0)
def update(self, dt):
self.circle.move()
self.wall1.collision(self.circle)
self.wall2.collision(self.circle)
self.wall3.collision(self.circle)
self.wall4.collision(self.circle)
self.transit1.transit(self.circle)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'left':
self.circle.velocity_x -= 0.1
elif keycode[1] == 'right':
self.circle.velocity_x += 0.1
elif keycode[1] == 'up':
self.circle.velocity_y += 0.1
elif keycode[1] == 'down':
self.circle.velocity_y -= 0.1
return True
class Menu(Screen):
pass
class Start(Screen):
pass
class area1(Screen):
pass
class WindowManager(ScreenManager):
pass
kv = Builder.load_file("dw.kv")
class Adventure(App):
def build(self):
return kv
Adventure().run()
and heres my .kv file
Circle:
size: 30,30
canvas:
Ellipse:
pos: self.pos
size: self.size
WindowManager:
Menu:
Start:
area1:
Menu>:
name: "menu"
#Adding gridlayout
GridLayout:
rows :3
cols : 1
AnchorLayout:
anchor_x : "center"
anchor_y : "center"
Label:
text: "Adventure"
font_size: 40
AnchorLayout:
anchor_x : "center"
anchor_y : "center"
TextInput:
id: ign
size_hint : (.4, None)
height : 30
hint_text : "Enter your name"
multiline : False
AnchorLayout:
anchor_x : "center"
anchor_y : "center"
Button:
text: "Start"
font_size: 40
size: 100, 75
size_hint: (None, None)
on_release: app.root.current = "start"
Start>:
name: "start"
Move:
wall1 : r1
wall2 : r2
wall3 : r3
wall4 : r4
transit1 : t1
circle : circle
Circle:
id : circle
pos: root.center_x , root.center_y
Wall:
id : r1
pos: 0, 400
size: 350, 250
canvas:
Rectangle:
pos: self.pos
size: self.size
Wall:
id : r2
pos: 0 , 0
size: 350, 250
canvas:
Rectangle:
pos: self.pos
size: self.size
Wall:
id : r3
pos: 500 , 400
size: 800, 250
canvas:
Rectangle:
pos: self.pos
size: self.size
Wall:
id : r4
pos: 500 , 0
size: 800, 250
canvas:
Rectangle:
pos: self.pos
size: self.size
Transit:
id : t1
pos: 0, root.center_y
size: 1, 600
area1>:
name: 'a1'
Your only problem is that the line:
WindowManager.manager.current = "a1"
is trying to access the manager attribute of the WindowManager class. You actually want to access that attribute of the WindowManager instance. To do that, you can replace that line with:
App.get_running_app().root.current = "a1"
I am trying to use LinearRegressionWithSGD on Million Song Data Set and my model returns NaN's as weights and 0.0 as the intercept. What might be the issue for the error ? I am using Spark 1.40 in standalone mode.
Sample data: http://www.filedropper.com/part-00000
Here is my full code:
// Import Dependencies
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.mllib.regression.LinearRegressionModel
import org.apache.spark.mllib.regression.GeneralizedLinearAlgorithm
import org.apache.spark.mllib.regression.LinearRegressionWithSGD
// Define RDD
val data =
sc.textFile("/home/naveen/Projects/millionSong/YearPredictionMSD.txt")
// Convert to Labelled Point
def parsePoint (line: String): LabeledPoint = {
val x = line.split(",")
val head = x.head.toDouble
val tail = Vectors.dense(x.tail.map(x => x.toDouble))
return LabeledPoint(head,tail)
}
// Find Range
val parsedDataInit = data.map(x => parsePoint(x))
val onlyLabels = parsedDataInit.map(x => x.label)
val minYear = onlyLabels.min()
val maxYear = onlyLabels.max()
// Shift Labels
val parsedData = parsedDataInit.map(x => LabeledPoint(x.label-minYear
, x.features))
// Training, validation, and test sets
val splits = parsedData.randomSplit(Array(0.8, 0.1, 0.1), seed = 123)
val parsedTrainData = splits(0).cache()
val parsedValData = splits(1).cache()
val parsedTestData = splits(2).cache()
val nTrain = parsedTrainData.count()
val nVal = parsedValData.count()
val nTest = parsedTestData.count()
// RMSE
def squaredError(label: Double, prediction: Double): Double = {
return scala.math.pow(label - prediction,2)
}
def calcRMSE(labelsAndPreds: RDD[List[Double]]): Double = {
return scala.math.sqrt(labelsAndPreds.map(x =>
squaredError(x(0),x(1))).mean())
}
val numIterations = 100
val stepSize = 1.0
val regParam = 0.01
val regType = "L2"
val algorithm = new LinearRegressionWithSGD()
algorithm.optimizer
.setNumIterations(numIterations)
.setStepSize(stepSize)
.setRegParam(regParam)
val model = algorithm.run(parsedTrainData)
I am not familiar with this specific implementation of SGD, but generally if a gradient descent solver goes to nan that means that the learning rate is too big. (in this case I think it is the stepSize variable).
Try to lower it by an order of magnitude each time until it starts to converge
I can think there are two possibilities.
stepSize is big. You should try something like 0.01, 0.03, 0.1,
0.3, 1.0, 3.0....
Your train data have NaN. If so, result will be likely NaN.