Google Colab Upload error, how can you fix it? (Cannot read property '_uploadFiles' of undefined) - upload

my problem is that I always get the following error when I operate the following code.Strange thing is that, when i set the epochs to 0 the error dosnt show up and I can upload with no problems. Thanks for the Help!
I have already tried anabling third party cockies, which did not help. The strange thing is, that the upload works, if I set the training epochs to 0.
Sometimes the error is google.colab._files is undefined.
I have already tried to use Chrome and Firefox.
import tensorflow as tf
import numpy as np
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images = training_images.reshape(60000, 28, 28, 1)
training_images = training_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3,3),activation='relu', input_shape=(28,28,1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images,training_labels, epochs=1)
classes = model.predict(test_images)
predicted_classes = np.argmax(classes, axis=1)
print(classes[0])
print(test_labels[0])
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
import matplotlib.pyplot as plt
plt.imshow(test_images[0], cmap='Greys_r')
import numpy as np
from google.colab import files
from keras.preprocessing import image
import cv2
import matplotlib.pyplot as plt
uploaded = files.upload()
for fn in uploaded.keys():
path = '/content/' + fn
img = cv2.imread(path)
img = cv2.resize(img,(28,28))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x = image.img_to_array(img, dtype=np.float32)
print("top left pixel value:", x[0,0])
if x[0,0] > 250:
# white background
print("needs to be inverted!")
x -= 255
x *= -1
x = x / 255.0
x = x.reshape(1, 28, 28, 1)
plt.imshow(img, cmap='Greys_r')
plt.show()
classes = model.predict(x)
plt.bar(range(10), classes[0])
plt.show()
print("prediction: class", np.argmax(classes[0]))
TypeError: Cannot read property '_uploadFiles' of undefined

So I found out that it works if you use 2 cells one for the neural network and one for the upload feature.

Related

Semantic segmentation results is all black all bits are the same no mask

When I use the code below to perform Semantic semgentation on my owndataset(40 images) and annotations(1 class(myface) and the annotations in cocojson format) I got just black image no mask and all bits are the same and the model accuracy in all epochs is 67.87% but loss is going down in each epoch :
import cv2
import os
import json
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.models import Model
from pycocotools.coco import COCO
from tensorflow.keras.applications import VGG16
from sklearn.utils import compute_sample_weight
folder_path = "D:\\ImageClassification\\face_semantic_segmentation\\dataset"
filenames = os.listdir(folder_path)
images = []
for filename in filenames:
img = cv2.imread(os.path.join(folder_path, filename))
img = cv2.resize(img, (256, 256))
img = np.array(img)
images.append(img)
x_train = np.array(images)
with open("D:\\ImageClassification\\face_semantic_segmentation\\annotations.json") as f:
coco = json.load(f)
annotations = coco['annotations']
masks = {}
for annotation in annotations:
image_id = annotation['image_id']
if image_id not in masks:
masks[image_id] = []
masks[image_id].append(annotation['segmentation'])
resized_masks = []
for image_id, mask in masks.items():
mask_img = np.zeros((720, 1280), dtype=np.uint8)
for segmentation in mask:
poly = np.array(segmentation).reshape((-1, 1, 2)).astype(np.int32)
cv2.fillPoly(mask_img, [poly], 1)
mask_img = cv2.resize(mask_img, (256, 256))
mask_img = np.stack([mask_img] * 3, axis=-1)
resized_masks.append(mask_img)
y_train = np.array(resized_masks)
y_train.shape
import matplotlib.pyplot as plt
import numpy as np
mask = y_train[4]
mask = np.sum(mask, axis=-1)
plt.imshow(mask)
plt.show()
from segmentation_models import Unet
from segmentation_models import get_preprocessing
from segmentation_models.losses import bce_jaccard_loss
from segmentation_models.metrics import iou_score
from tensorflow.keras.models import Model
BACKBONE = 'resnet50'
preprocess_input = get_preprocessing(BACKBONE)
model = Unet(BACKBONE,classes=2,input_shape=(256,256, 3), encoder_weights='imagenet',activation='sigmoid')
x_train = preprocess_input(x_train)
x = model.layers[-1].output
x = Conv2D(3, (1, 1), activation='sigmoid')(x)
model = Model(inputs=model.input, outputs=x)
model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['binary_accuracy'])
model.fit(x=x_train,y=y_train,batch_size=32,epochs=50)

I want to use cv2_imshow in colab

import cv2 as cv
import numpy as np
from google.colab.patches import cv2_imshow
from tensorflow.keras.models import load_model
img_color = cv.imread('test3.jpg', cv.IMREAD_COLOR)
img_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)
ret,img_binary = cv.threshold(img_gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
kernel = cv.getStructuringElement( cv.MORPH_RECT, ( 5, 5 ) )
img_binary = cv.morphologyEx(img_binary, cv. MORPH_CLOSE, kernel)
cv2_imshow('digit', img_binary)
cv.waitKey(0)
This is the process of training a model in handwritten and testing the trained model.
I want to load an image using imshow() in colab.
Is there any way to use it without setting the file path?
You can use matplotlib's function for it
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.gcf()
fig.set_size_inches(18, 10)
plt.axis("off")
plt.rcParams['figure.figsize'] = [20, 10]
plt.imshow(img_binary)
plt.show()

ValueError: cannot reshape array of size 178608 into shape (1,224,224,3)

I'm trying to get real-time face recognition for a trained VGG16 model(It has 6 classes). When I tried, I got the above error. These are my codes.
from PIL import Image
from tensorflow.keras.applications.vgg16 import preprocess_input
import base64
from io import BytesIO
import json
import random
import cv2
from keras.models import load_model
import numpy as np
from keras_preprocessing import image
model = load_model('FAceRec.h5', compile=False)
face_cas = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def face_extr(img):
faces = face_cas.detectMultiScale(img, 1.3, 5)
if faces is ():
return None
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,255), 2)
cropped_face = img[y:y+h, x:x+w]
return cropped_face
## web cam
video_cap = cv2.VideoCapture(0)
while True:
_, frame = video_cap.read()
face = face_extr(frame)
if type(face) is np.ndarray:
face = cv2.resize(face, (244, 244),3)
face = face.reshape(1, 224, 224, 3)
img = Image.fromarray(face, 'RGB')
img_array = np.array(img)
img_array = np.expand_dims(img_array, axis=0)
pred = model.predict(img_array)
print(pred)
name = "No Matching"
if(pred[1][1]>0.5):
name = "Suhail"
cv2.putText(frame, name, (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
else:
cv2.putText(frame, "No Matching Face", (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Result', frame)
k=cv2.waitKey(1)
if k==ord('q'):
break
video_cap.release()
cv2.destroyAllWindows()
can someone please help me to figure this out. even when I change the reshape with (244,244,3) it shows the same error again and again. Can somebody explain why its happening and how to solve this?

How to solve Error when checking input: with incorrect size

This is my first attempt to solve task on the kaggle. This is page of the task - https://www.kaggle.com/c/bike-sharing-demand.
I wrote this code (I have some excess code lines, because I am not really sure what I need now):
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import keras
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
train_targets = train_data[['casual', 'registered', 'count']]
train_datetime_helper = train_data[['datetime']]
dt = pd.DatetimeIndex(train_data['datetime'])
train_data['day'] = dt.day
train_data['month'] = dt.month
train_data['year'] = dt.year
train_data['hour'] = dt.hour
train_data['dow'] = dt.dayofweek
train_data['woy'] = dt.weekofyear
train_data = train_data.drop(['casual', 'registered', 'count', 'datetime'], axis=1)
test_data = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
test_datetime_helper = test_data[['datetime']]
dt = pd.DatetimeIndex(test_data['datetime'])
test_data['day'] = dt.day
test_data['month'] = dt.month
test_data['year'] = dt.year
test_data['hour'] = dt.hour
test_data['dow'] = dt.dayofweek
test_data['woy'] = dt.weekofyear
test_data = test_data.drop(['datetime'], axis=1)
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
from keras import models
from keras import layers
from keras.layers import Dense, Conv2D, Flatten
def build_model():
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1], train_targets.shape[1])))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('Processing fold #', i)
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]], axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]], axis=0)
model = build_model()
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
I have got this error. Can you explain how can I solve it enter image description here
you specified wrong the input dimension of your model. try to define your first layer in this way
model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],)))

Display result of convolution in PyTorch

PyTorch newbie here. I wrote a script (code below) that performs the following operations: load an image, perform a 2D convolution operation and then display the output and the input.
At present I have the image below, which seems off. How can I plot the feature map correctly?
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import matplotlib.pyplot as plt
import imageio
import sys
A = imageio.imread('LiT.png')
# Define how the convolution operation works
conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1)
image_d = torch.FloatTensor(np.asarray(A.reshape(1, 3, A.shape[0] , A.shape[1])))
fc = conv2(image_d)
fc1 = fc.permute(0, 2, 3, 1).reshape([516, 780, 3])
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(A)
plt.subplot(1,2,2)
plt.imshow(fc1.data.numpy())
plt.show()
The issue with your code is this line
image_d = torch.FloatTensor(np.asarray(A.reshape(1, 3, A.shape[0] , A.shape[1])))
You can't just reshape the image you need to transpose the channels. As a remark for the future, if you get a stripy result like you did it's most likely some permutation/transposition or reshaping operation that's not correct.
Other than that I also scaled the input image to [0, 1] to show it properly. Below is the working code:
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import matplotlib.pyplot as plt
import imageio
import sys
A = imageio.imread('LiT.png')
# Define how the convolution operation works
conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1)
# from [H, W, C] to [C, H, W]
transposed_image = A.transpose((2, 0, 1))
# add batch dim
transposed_image = np.expand_dims(transposed_image, 0)
image_d = torch.FloatTensor(transposed_image)
fc = conv2(image_d)
fc1 = fc.permute(0, 2, 3, 1)[0]
result = fc1.data.numpy()
max_ = np.max(result)
min_ = np.min(result)
result -= min_
result /= max_
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(A)
plt.subplot(1,2,2)
plt.imshow(result)
plt.show()
To my understanding, the problem lies in how you are permuting channels position in the image by using reshape. Instead, 'np.transpose or tensor.permute should be used. Using torch for permutation:
image_d = torch.FloatTensor(np.asarray(A)).unsqueeze(0).permute(0,3,1,2)
Or, if we want to handle the permutation part in numpy:
image_d = np.transpose(np.asarray(A), (2,0,1))
image_d = torch.FloatTensor(image_d).unsqueeze(0)

Resources