Kivy: How to Clear Widgets That Created Dynamically Without Using Clear_widgets() Command? - kivy

Colud you please help me to clear some widgets? I used clear_widgets() but this make some problem in Android devices for me.
I have two class for my Labels and Buttons:
class Updown(ButtonBehavior, Image):
def __init__(self, **kwargs):
super(Updown, self).__init__(**kwargs)
class WrappedLabel(Label):
def __init__(self, **kwargs):
super(WrappedLabel, self).__init__(**kwargs)
self.bind(
width=lambda *x: self.setter('text_size')(self, (self.width, None)),
texture_size = lambda *x: self.setter('height')(self, self.texture_size[1]))
Here is my dynamically created Widgets:
box = BoxLayout(size_hint_y = None, height = dp(50))
sira = WrappedLabel(text = f'{i+1}', font_name = 'fonts/Lcd.ttf', font_size = dp(15), size_hint = (0.10, 1), halign='center' )
evsahibi = WrappedLabel(text = f'{self.Today[0][i]}', font_name = 'Roboto', font_size = dp(15), size_hint = (0.225, 1), halign='center', bold = True )
deplasman = WrappedLabel(text = f'{self.Today[1][i]}', font_name = 'Roboto', font_size = dp(15), size_hint = (0.225, 1), halign='center', bold = True )
macbasigol = WrappedLabel(text = f'{sonuc}', font_name = 'fonts/Lcd.ttf', font_size = dp(20), size_hint = (0.15, 1), halign='center', color= (1, 0.4, 0.769, 1) )
if sonuc == int(0):
ust_2_5 = Updown(size_hint = (0.15, 1), source = 'images/error.png')
ust_3_5 = Updown(size_hint = (0.15, 1), source = 'images/error.png')
elif sonuc < 2.50:
ust_2_5 = Updown(size_hint = (0.15, 1), source = 'images/down.png')
ust_3_5 = Updown(size_hint = (0.15, 1), source = 'images/down.png')
elif sonuc >= 2.50 and sonuc < 3.50:
ust_2_5 = Updown(size_hint = (0.15, 1), source = 'images/up.png')
ust_3_5 = Updown(size_hint = (0.15, 1), source = 'images/down.png')
else:
ust_2_5 = Updown(size_hint = (0.15, 1), source = 'images/up.png')
ust_3_5 = Updown(size_hint = (0.15, 1), source = 'images/up.png')
box.add_widget(sira)
box.add_widget(evsahibi)
box.add_widget(deplasman)
box.add_widget(macbasigol)
box.add_widget(ust_2_5)
box.add_widget(ust_3_5)
#for child in self.ids.gridsonuc.children:
#print(child)
#print(child.children)
self.ids.gridsonuc.add_widget(box)
#for child in self.ids.gridsonuc.children:
#print(child)
Results:
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B365A6120>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B3621C6D0>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B365A6120>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B361E6CF0>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B3621C6D0>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B365A6120>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B361932E0>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B361E6CF0>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B3621C6D0>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B365A6120>
<kivy.uix.boxlayout.BoxLayout object at 0x0000008B361CE890>
703 lines!
#for child in self.ids.gridsonuc.children:
#print(child.children)
Results:
[<__main__.Updown object at 0x00000088122D5430>, <__main__.Updown object at 0x00000088122E3120>, <__main__.WrappedLabel object at 0x00000088122C6350>, <__main__.WrappedLabel object at 0x00000088122C8580>, <__main__.WrappedLabel object at 0x00000088122E67B0>, <__main__.WrappedLabel object at 0x00000088122B59E0>]
[<__main__.Updown object at 0x00000088123869E0>, <__main__.Updown object at 0x000000881238A740>, <__main__.WrappedLabel object at 0x0000008812395970>, <__main__.WrappedLabel object at 0x000000881239EBA0>, <__main__.WrappedLabel object at 0x00000088123A6DD0>, <__main__.WrappedLabel object at 0x00000088123AC040>]
.
.
.
WrappedLabel and Updown objects
What i tried and not worked properly for me?
for child in self.ids.gridsonuc.children:
self.ids.gridsonuc.remove_widget(self.ids.gridsonuc.children[0])
I tried above code and not properly remove all widgets. It deletes <kivy.uix.boxlayout.BoxLayout object at 0x0000008B361CE890> 84 lines of 703 lines, 19 left
I aldo tried
self.ids.gridsonuc.remove_widget(box)
in another function. I set every box to self.box but could not success, not worked for me
Could you please hep me to remove all without using clear_widgets() command?
Thanks very much

Related

problem with custom collate_fn in Dataloader

I am trying to do some graph classification task. to do so I needed to write my own collate function to use in Dataloader in pytorch.
train_loader =loader.DataLoader(train_dataset, batch_size=2, shuffle=False,collate_fn=collate_mydataset)
"collate_mydataset" function seems to work well when is called directly when I pass two different samples (with different number of nodes/edges/fearures/etc ) in a list to it. it pads correctly everything. but when I set batch_size>1 ( or to make it similar to the case of calling it directly , lets say batch_size=2 ) it doesn't work well and says stack expects each tensor to be equal size, but got [32, 9] at entry 0 and [15, 9] at entry 1 ( what I understand it says node_features are not padded correctly. here is also my collate function:
def collate_mydataset(samples):
num_nodes_list = [data[5] for data in samples]
max_num_nodes = max(num_nodes_list)
num_edges_list = [data[2].size(0) for data in samples]
max_num_edges = max(num_edges_list)
features_list = [data[0] for data in samples] #node features
edge_indices_list = [data[1] for data in samples]
edge_features_list = [data[2] for data in samples]
graph_labels_list = [data[3] for data in samples]
m_list = [data[4] for data in samples]
features_padded = []
for feature in features_list:
num_nodes = feature.shape[0]
if num_nodes < max_num_nodes:
padding = torch.zeros((max_num_nodes - num_nodes, feature.shape[1]))
features_padded.append(torch.cat([feature, padding], 0))
else:
features_padded.append(feature)
features = torch.stack(features_padded, dim=0)
edge_indices_padded = []
for edge_indices in edge_indices_list:
num_edges = edge_indices.shape[1]
if num_edges < max_num_edges:
padding = torch.zeros((2, max_num_edges - num_edges))
edge_indices_padded.append(torch.cat([edge_indices, padding], 1))
else:
edge_indices_padded.append(edge_indices)
edge_indices = torch.stack(edge_indices_padded, dim=1)
edge_features_padded = []
for feature in features_list:
num_edges = feature.shape[0]
if num_edges < max_num_edges:
padding = torch.zeros((max_num_edges - num_edges, feature.shape[1]))
edge_features_padded.append(torch.cat([feature, padding], 0))
else:
edge_features_padded.append(feature)
edge_features = torch.stack(edge_features_padded, dim=0)
graph_labels = torch.stack(graph_labels_list, dim=0)
m_padded = []
for m in m_list:
num_nodes = m.shape[0]
if num_nodes < max_num_nodes:
padding = torch.zeros((max_num_nodes - num_nodes, m.shape[1]))
m_padded.append(torch.cat([m, padding], 0))
else:
m_padded.append(m)
m = torch.stack(m_padded, dim=0)
return features, edge_indices, edge_features, graph_labels, m
what could be the issue ?

UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7fd42c66d3b0>

I am trying to do an object detection problem and been working with aquarium dataset from roboflow. I have been trying to create a bounding box for the fishes, but I have getting the error:
UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7fd42c66d3b0>
I also tried to see what images are corrupted and ran a code
import PIL
from pathlib import Path
from PIL import UnidentifiedImageError
count = 0
path = Path("/content/drive/MyDrive/archive/Aquarium Combined").rglob("*.jpg")
for img_p in path:
try:
img = PIL.Image.open(img_p)
except PIL.UnidentifiedImageError:
print(img_p)
count +=1
print(count)
It has given me a count of 651 images, but my dataset has 662 images. I guess PIL doesn't know how to decode it or I don't know what the problem is. I will attach a sample image file name
/content/drive/MyDrive/archive/Aquarium Combined/test/IMG_2301_jpeg_jpg.rf.2c19ae5efbd1f8611b5578125f001695.jpg
Full traceback:
UnidentifiedImageError Traceback (most recent call last)
<ipython-input-31-2785d562a97e> in <module>()
4 sample[1]['boxes'][:, [1, 0, 3, 2]],
5 [classes[i] for i in sample[1]['labels']],
----> 6 width=4).permute(1, 2, 0)
7 )
3 frames
/usr/local/lib/python3.7/dist-packages/PIL/Image.py in open(fp, mode)
2894 if mode == "P":
2895 from . import ImagePalette
-> 2896
2897 im.palette = ImagePalette.ImagePalette("RGB", im.im.getpalette("RGB"))
2898 im.readonly = 1
UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7fd42c66d3b0>
Also I am providing the class functions"
class AquariumDetection(datasets.VisionDataset):
def __init__(
self,
root: str,
split = "train",
transform= None,
target_transform = None,
transforms = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
self.split = split
self.coco = COCO(os.path.join(root, split, "_annotations.coco.json"))
self.ids = list(sorted(self.coco.imgs.keys()))
self.ids = [id for id in self.ids if (len(self._load_target(id)) > 0)]
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
image = cv2.imread(os.path.join(self.root, self.split, path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def _load_target(self, id: int):
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int):
id = self.ids[index]
image = self._load_image(id)
target = copy.deepcopy(self._load_target(id))
boxes = [t['bbox'] + [t['category_id']] for t in target]
if self.transforms is not None:
transformed = self.transforms(image=image, bboxes=boxes)
image = transformed['image']
boxes = transformed['bboxes']
new_boxes = []
for box in boxes:
xmin = box[0]
ymin = box[1]
xmax = xmin + box[2]
ymax = ymin + box[3]
new_boxes.append([ymin, xmin, ymax, xmax])
boxes = torch.tensor(new_boxes, dtype=torch.float32)
_, h, w = image.shape
targ = {}
targ["boxes"] = boxes
targ["labels"] = torch.tensor([t["category_id"] for t in target], dtype=torch.int64)
targ["image_id"] = torch.tensor([t["image_id"] for t in target])
targ["area"] = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
targ["iscrowd"] = torch.tensor([t["iscrowd"] for t in target], dtype=torch.int64)
targ["img_scale"] = torch.tensor([1.0])
targ['img_size'] = (h, w)
image = image.div(255)
normalize = T.Compose([T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
return normalize(image), targ, index
def __len__(self) -> int:
return len(self.ids)

'Not callable' error when calculating integrated gradient interpretability with captum

I’m trying to perform model interpretability with captum but running into an error. Specifically, it says:
/usr/lib/python3.7/inspect.py in _signature_from_callable(obj, follow_wrapper_chains, skip_bound_arg, sigcls)
2206
2207 if not callable(obj):
-> 2208 raise TypeError('{!r} is not a callable object'.format(obj))
2209
2210 if isinstance(obj, types.MethodType):
I’m not certain how to resolve this. Here’s the definition of my model, for reference:
class dvib(nn.Module):
def __init__(self,k,out_channels, hidden_size):
super(dvib, self).__init__()
self.conv = torch.nn.Conv2d(in_channels=1,
out_channels = out_channels,
kernel_size = (1,20),
stride=(1,1),
padding=(0,0),
)
self.rnn = torch.nn.GRU(input_size = out_channels,
hidden_size = hidden_size,
num_layers = 2,
bidirectional = True,
batch_first = True,
dropout = 0.2
)
self.fc1 = nn.Linear(hidden_size*4, hidden_size*4)
self.enc_mean = nn.Linear(hidden_size*4+578,k)
self.enc_std = nn.Linear(hidden_size*4+578,k)
self.dec = nn.Linear(k, 2)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.xavier_uniform_(self.enc_mean.weight)
nn.init.constant_(self.enc_mean.bias, 0.0)
nn.init.xavier_uniform_(self.enc_std.weight)
nn.init.constant_(self.enc_std.bias, 0.0)
nn.init.xavier_uniform_(self.dec.weight)
nn.init.constant_(self.dec.bias, 0.0)
def cnn_gru(self,x,lens):
print(x.shape)
x = x.unsqueeze(1)
print('after first unsqueeze: ', x.shape)
x = self.conv(x)
print('after conv: ', x.shape)
x = torch.nn.ReLU()(x)
print('shape after relu: ', x.shape,type(x))
x = x.squeeze(3)
print('shape after squeeze: ', x.shape)
x = x.view(x.size(0),-1)
x = x.permute(0,2,1)
print('shape after permute: ', x.shape)
print(type(lens))
gru_input = pack_padded_sequence(x,lens,batch_first=True, enforce_sorted=False)
output, hidden = self.rnn(gru_input)
print('hidden layer: ', hidden.shape)
output_all = torch.cat([hidden[-1],hidden[-2],hidden[-3],hidden[-4]],dim=1)
print("output_all.shape:",output_all.shape)
return output_all
def forward(self, pssm, lengths, FEGS):
cnn_vectors = self.cnn_gru(pssm, lengths)
feature_vec = torch.cat([cnn_vectors, FEGS], dim = 1)
enc_mean, enc_std = self.enc_mean(feature_vec), f.softplus(self.enc_std(feature_vec)-5)
eps = torch.randn_like(enc_std)
latent = enc_mean + enc_std*eps
outputs = f.sigmoid(self.dec(latent))
print(outputs.shape)
return outputs, enc_mean, enc_std, latent
I load pretrained weights into the model as well, prior to passing it to captum with the relevant arguments:
ig = IntegratedGradients(model(test_pssm_small, test_len_small, test_FEGS_small))
attr = ig.attribute(test_FEGS_small, n_steps=5)

dropout(): argument 'input' (position 1) must be Tensor, not tuple when using XLNet with HuggingfCE

I get an error saying that the input should be of type Tensor, not tuple. I do not know how to work around this problem, as I am already implementing the return_dict=False method as stated in the migration plan.
My model is as follows:
class XLNetClassifier(torch.nn.Module):
def __init__(self, dropout_rate=0.1):
super(XLNetClassifier, self).__init__()
self.XLNet = XLNetModel.from_pretrained('xlnet-base-cased', return_dict=False)
self.d1 = torch.nn.Dropout(dropout_rate)
self.l1 = torch.nn.Linear(768, 64)
self.bn1 = torch.nn.LayerNorm(64)
self.d2 = torch.nn.Dropout(dropout_rate)
self.l2 = torch.nn.Linear(64, 3)
def forward(self, input_ids, attention_mask):
x = self.XLNet(input_ids=input_ids, attention_masks = attention_mask)
x = self.d1(x)
x = self.l1(x)
x = self.bn1(x)
x = torch.nn.Tanh()(x)
x = self.d2(x)
x = self.l2(x)
return x
The error occurs when calling the dropout.
The XLNetModel returns two output values:
last_hidden_state
mems
That means you get a tuple and not a single tensor as the error message says. Your class definition should therefore be:
from transformers import XLNetModel, XLNetTokenizerFast
import torch
class XLNetClassifier(torch.nn.Module):
def __init__(self, dropout_rate=0.1):
super(XLNetClassifier, self).__init__()
self.XLNet = XLNetModel.from_pretrained('xlnet-base-cased', return_dict=False)
self.d1 = torch.nn.Dropout(dropout_rate)
self.l1 = torch.nn.Linear(768, 64)
self.bn1 = torch.nn.LayerNorm(64)
self.d2 = torch.nn.Dropout(dropout_rate)
self.l2 = torch.nn.Linear(64, 3)
def forward(self, input_ids, attention_mask):
x = self.XLNet(input_ids=input_ids, attention_masks = attention_mask)
x = self.d1(x[0])
x = self.l1(x)
x = self.bn1(x)
x = torch.nn.Tanh()(x)
x = self.d2(x)
x = self.l2(x)
return x
tokenizer = XLNetTokenizerFast.from_pretrained('xlnet-base-cased')
model = XLNetClassifier()
inputs = tokenizer("Hello, my dog is cute", return_tensors="pt", return_token_type_ids=False)
outputs = model(**inputs)
or even better without return_dict=False
class XLNetClassifier(torch.nn.Module):
def __init__(self, dropout_rate=0.1):
super(XLNetClassifier, self).__init__()
self.XLNet = XLNetModel.from_pretrained('xlnet-base-cased')
self.d1 = torch.nn.Dropout(dropout_rate)
self.l1 = torch.nn.Linear(768, 64)
self.bn1 = torch.nn.LayerNorm(64)
self.d2 = torch.nn.Dropout(dropout_rate)
self.l2 = torch.nn.Linear(64, 3)
def forward(self, input_ids, attention_mask):
x = self.XLNet(input_ids=input_ids, attention_masks = attention_mask)
x = self.d1(x.last_hidden_state)
x = self.l1(x)
x = self.bn1(x)
x = torch.nn.Tanh()(x)
x = self.d2(x)
x = self.l2(x)
return x

All input arrays and target arrays must have the same number of samples."- Training on single image to check if the model works in keras

def obcandidate(inputvgg,outputmodel):
graph = Graph()
graph.add_input(name = 'input1', input_shape = (512, 14, 14))
graph.add_node(Convolution2D(512, 1, 1), name = 'conv11', input = 'input1')
graph.add_node(Convolution2D(512, 14, 14), name = 'conv112', input = 'conv11')
graph.add_node(Flatten(), name = 'flatten11', input = 'conv112')
graph.add_node(Dense(3136), name = 'dense1', input = 'flatten11')
graph.add_node((Activation('relu')), name = 'relu', input = 'dense1')
graph.add_node(Reshape((56,56)), name = 'reshape', input = 'relu')
sgd = SGD(lr = 0.001, decay = .00005, momentum = 0.9, nesterov = True)
graph.add_output(name = 'output1', input = 'reshape')
graph.compile(optimizer = sgd, loss = {
'output1': 'binary_crossentropy'})
print 'compile success'
history = graph.fit({'input1':inputvgg, 'output1':outputmodel}, nb_epoch=1)
predictions = graph.predict({'input1':inputvgg})
return graph
""
"main function"
""
if __name__ == "__main__":
model = VGG_16('vgg16_weights.h5')
sgdvgg = SGD(lr = 0.1, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgdvgg, loss = 'categorical_crossentropy')
finaloutputmodel = outputofconvlayer(model)
finaloutputmodel.compile(optimizer = sgdvgg, loss = 'categorical_crossentropy')
img = cv2.resize(cv2.imread('000032.jpg'), (224, 224))
mean_pixel = [103.939, 116.779, 123.68]
img = img.astype(np.float32, copy = False)
for c in range(3):
img[: , : , c] = img[: , : , c] - mean_pixel[c]
img = img.transpose((2, 0, 1))
img = np.expand_dims(img, axis = 0)
imgout = np.asarray(cv2.resize(cv2.imread('000032seg.png',0), (56, 56)))
imgout[imgout!=0]=1
out=imgout
inputvgg = np.asarray(finaloutputmodel.predict(img))
obcandidate(inputvgg,out)
Hi ,above is my code where i am trying to segment object candidate through graph model,
i want to check for one input if the code works or not so i am giving it one input image and the output image,
But keras gives me an error - "All input arrays and target arrays must have the same number of samples."
Can anyone tell me what do i do to see if my model runs .i am training on one input so that i can verify that my model is correct and start training ,is there any other way to do it?
In the part where you do this - history = graph.fit({'input1':inputvgg, 'output1':outputmodel}, nb_epoch=1) inputvgg and outputmodel should have same number of dimensions.

Resources