How to solve "ValueError: Size is invalid. Valid font size are xx-small, x-small, small, medium, large, x-large, xx-large, larger, smaller, None"? - histogram

I used
import matplotlib as mpl
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
t = ax.text(0.5, 0.5, 'Text')
fonts = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large', 'larger', 'smaller']
for font in fonts:
t.set_fontsize(font)
print (font, round(t.get_fontsize(), 2))
plt.close()
The output was
Blockquotexx-small 5.79
x-small 6.94
small 8.33
medium 10.0
large 12.0
x-large 14.4
xx-large 17.28
larger 12.0
smaller 8.33
Blockquote
Then I run my histogram coding by following codes and found the error.
import matplotlib.pyplot as plt
# Put your code here to create the plot
# %matplotlib inline
fig = plt.figure(figsize = (10, 5))
ax = fig.gca()
Plot_file['tripduration'].hist(ax = ax, alpha = 0.75, xlabelsize='Seconds', ylabelsize= 'Number of rides', bins=50)
Is there anyone to solve this error please?

Related

Why isn't RandomCrop inserting the padding in pytorch?

I am getting that RandomCrop isn't putting the padding when I create my images. Why is it?
Reproducible script 1
todo with cifar...
Reproducible script 2:
code:
def check_size_of_mini_imagenet_original_img():
import random
import numpy as np
import torch
import os
seed = 0
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
import learn2learn
batch_size = 5
kwargs: dict = dict(name='mini-imagenet', train_ways=2, train_samples=2, test_ways=2, test_samples=2)
kwargs['data_augmentation'] = 'lee2019'
benchmark: learn2learn.BenchmarkTasksets = learn2learn.vision.benchmarks.get_tasksets(**kwargs)
tasksets = [(split, getattr(benchmark, split)) for split in splits]
for i, (split, taskset) in enumerate(tasksets):
print(f'{taskset=}')
print(f'{taskset.dataset.dataset.transform=}')
for task_num in range(batch_size):
X, y = taskset.sample()
print(f'{X.size()=}')
assert X.size(2) == 84
print(f'{y.size()=}')
print(f'{y=}')
for img_idx in range(X.size(0)):
visualize_pytorch_tensor_img(X[img_idx], show_img_now=True)
if img_idx >= 5: # print 5 images only
break
# visualize_pytorch_batch_of_imgs(X, show_img_now=True)
print()
if task_num >= 4: # so to get a MI image finally (note omniglot does not have padding at train...oops!)
break
break
break
and
def visualize_pytorch_tensor_img(tensor_image: torch.Tensor, show_img_now: bool = False):
"""
Due to channel orders not agreeing in pt and matplot lib.
Given a Tensor representing the image, use .permute() to put the channels as the last dimension:
ref: https://stackoverflow.com/questions/53623472/how-do-i-display-a-single-image-in-pytorch
"""
from matplotlib import pyplot as plt
assert len(tensor_image.size()) == 3, f'Err your tensor is the wrong shape {tensor_image.size()=}' \
f'likely it should have been a single tensor with 3 channels' \
f'i.e. CHW.'
if tensor_image.size(0) == 3: # three chanels
plt.imshow(tensor_image.permute(1, 2, 0))
else:
plt.imshow(tensor_image)
if show_img_now:
plt.tight_layout()
plt.show()
images here: https://github.com/learnables/learn2learn/issues/376#issuecomment-1319368831
first one:
I am getting odd images despite printing the transform the data is using:
-- splits[i]='train'
taskset=<learn2learn.data.task_dataset.TaskDataset object at 0x7fbc38345880>
taskset.dataset.dataset.datasets[0].dataset.transform=Compose(
ToPILImage()
RandomCrop(size=(84, 84), padding=8)
ColorJitter(brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=None)
RandomHorizontalFlip(p=0.5)
ToTensor()
Normalize(mean=[0.47214064400000005, 0.45330829125490196, 0.4099612805098039], std=[0.2771838538039216, 0.26775040952941176, 0.28449041290196075])
)
but the padding is missing:
but when I use this instead:
train_data_transform = Compose([
RandomResizedCrop((size - padding*2, size - padding*2), scale=scale, ratio=ratio),
Pad(padding=padding),
ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=mean, std=std),
])
it seems to work:
why don't both have the 8 and 8 padding on both sides I expect?
I tried seeing the images with mini-imagenet for torch-meta and it also didn't seem the padding was there:
task_num=0
Compose(
RandomCrop(size=(84, 84), padding=8)
RandomHorizontalFlip(p=0.5)
ColorJitter(brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[-0.2, 0.2])
ToTensor()
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
)
X.size()=torch.Size([25, 3, 84, 84])
The code is much harder to make compact and reproducible but you can see my torchmeta_plot_images_is_the_padding_there ultimate-utils library.
For now since 2 data sets say that padding is not being inserted despite the transform saying it should be I am concluding there is a bug in pytorch or my pytorch version or I just don't understand RandomCrop. But the description is clear to me:
padding (int or sequence, optional) –
Optional padding on each border of the image. Default is None. If a single int is provided this is used to pad all borders.
and the normal padding Pad(...) says something very similar:
padding (int or sequence) –
Padding on each border. If a single int is provided this is used to pad all borders.
so what else could go wrong? The bottom img I provided with a pad is done with the above Pad() function not with RandomCrop.
cross:
gitissues: https://github.com/learnables/learn2learn/issues/376
pytorch forum: https://discuss.pytorch.org/t/why-isnt-randomcrop-inserting-the-padding-in-pytorch/166244
They are padded to 84+8 then cropped back to 84: you can see the black padding on each image (eg, on the left for the 2nd image).
I discovered & confirmed that by doing it on cifar. But note this NOT what the docs say for RandomCrop:
Optional padding on each border of the image. Default is None. If a single int is provided this is used to pad all borders.
it says something very similar to pad:
Padding on each border. If a single int is provided this is used to pad all borders.
See: https://github.com/learnables/learn2learn/issues/376#issuecomment-1319405466
I am going to report this to pytorch as a bug https://github.com/pytorch/pytorch/issues/89253. Reproducible code in cifar:
def check_padding_random_crop_cifar_pure_torch():
# -
import sys
print(f'python version: {sys.version=}')
import torch
print(f'{torch.__version__=}')
# -
from uutils.plot.image_visualization import visualize_pytorch_tensor_img
from torchvision.transforms import RandomCrop
# - for determinism
import random
random.seed(0)
import torch
torch.manual_seed(0)
import numpy as np
np.random.seed(0)
# -
from pathlib import Path
root = Path('~/data/').expanduser()
import torch
import torchvision
# - test tensor imgs
from torchvision.transforms import Resize
from torchvision.transforms import Pad
from torchvision.transforms import ToTensor
from torchvision.transforms import Compose
# -- see if pad doubles length
print(f'--- test padding doubles length with Pad(...)')
transform = Compose([Resize((32, 32)), Pad(padding=4), ToTensor()])
train = torchvision.datasets.CIFAR100(root=root, train=True, download=True,
transform=transform,
target_transform=lambda data: torch.tensor(data, dtype=torch.long))
transform = Compose([Resize((32, 32)), Pad(padding=8), ToTensor()])
test = torchvision.datasets.CIFAR100(root=root, train=True, download=True,
transform=transform,
target_transform=lambda data: torch.tensor(data, dtype=torch.long))
# - test padding doubles length
from torch.utils.data import DataLoader
loader = DataLoader(train)
x, y = next(iter(loader))
print(f'{x[0].size()=}')
assert x[0].size(2) == 32 + 4 * 2
assert x[0].size(2) == 32 + 8
visualize_pytorch_tensor_img(x[0], show_img_now=True)
#
loader = DataLoader(test)
x, y = next(iter(loader))
print(f'{x[0].size()=}')
assert x.size(2) == 32 + 8 * 2
assert x.size(2) == 32 + 16
visualize_pytorch_tensor_img(x[0], show_img_now=True)
# -- see if RandomCrop also puts the pad
print(f'--- test RandomCrop indeed puts padding')
transform = Compose([Resize((32, 32)), RandomCrop(28, padding=8), ToTensor()])
train = torchvision.datasets.CIFAR100(root=root, train=True, download=True,
transform=transform,
target_transform=lambda data: torch.tensor(data, dtype=torch.long))
transform = Compose([Resize((32, 32)), RandomCrop(28), ToTensor()])
test = torchvision.datasets.CIFAR100(root=root, train=True, download=True,
transform=transform,
target_transform=lambda data: torch.tensor(data, dtype=torch.long))
# - test that the padding is there visually
from torch.utils.data import DataLoader
loader = DataLoader(train)
x, y = next(iter(loader))
print(f'{x[0].size()=}')
assert x[0].size(2) == 28
visualize_pytorch_tensor_img(x[0], show_img_now=True)
#
loader = DataLoader(test)
x, y = next(iter(loader))
print(f'{x[0].size()=}')
assert x.size(2) == 28
visualize_pytorch_tensor_img(x[0], show_img_now=True

Richardson-Lucy not sharpening image

I had posted a question previously about the Richardson-Lucy algorithm. I have a follow-up question I would appreciate help with.
Below is the Python code I am using. My input image is already blurry so I removed program lines that I originally had to intentionally blur the image. I am getting the error "RuntimeWarning: invalid value encountered in true_divide relative_blur = image / convolve(im_deconv, psf, mode='same')" I would appreciate help with debugging this. I kept the lines in the program that I commented out based on the suggestion below.
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
from scipy.signal import convolve2d as conv2
from skimage import color, data, restoration
Image.open('TOFA-003_UV_Cured_Lincoln_Corrected.bmp').convert('L').save('TOFA-003_UV_Cured_Lincoln_Corrected_gray.bmp')
astro = Image.open('TOFA-003_UV_Cured_Lincoln_Corrected_gray.bmp')
psf = np.ones((5, 5)) / 25
#psf = np.ones((8, 8)) / 25
astro = conv2(astro, psf, 'same')
astro = astro/255
# Add Noise to Image
#astro_noisy = astro.copy()
#astro_noisy += (np.random.poisson(lam=25, size=astro.shape) - 10) / 255
#astro_noisy = astro_noisy/255
# Restore Image using Richardson-Lucy algorithm
deconvolved_RL = restoration.richardson_lucy(astro, psf, iterations=2)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(8, 5))
plt.gray()
for a in (ax[0], ax[1], ax[2]):
a.axis('off')
ax[0].imshow(astro)
ax[0].set_title('Original Data')
#ax[1].imshow(astro_noisy)
#ax[1].set_title('Noisy data')
ax[2].imshow(deconvolved_RL, vmin=astro.min(), vmax=astro.max())
ax[2].set_title('Restoration using\nRichardson-Lucy')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()

[OpenCV]how to fix contours to rectangle?

Note
I'm new to OpenCV(or computer vision), so it would be very helpful just to tell me the search query!
What I want to ask
I want to write a program that extract the business cards from pictures.
I was able to extract a rough outline, but reflected light becomes noise and I can't extract an accurate outline. Please tell me your idea.
image(raw data)
raw data
output
output data(rough outline)
code
import math
import itertools
from glob import glob
import cv2
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
def read_images():
"read image data from data directory"
names = glob('data/*.jpg')
names.sort()
return map(lambda name: cv2.imread(name), names)
def blur(img):
"apply blur"
return cv2.GaussianBlur(img, (25, 25), 0)
def show_images(images, column, color_type=cv2.COLOR_BGR2RGB):
"plot images with matplotlib"
plt.figure(figsize=(10,10), dpi=150)
for n, img in zip(range(len(images)), images):
p = plt.subplot(math.ceil(len(images) / column), column, n + 1)
p.axis('off')
if color_type is None:
p.imshow(img)
else:
p.imshow(cv2.cvtColor(img, color_type))
plt.show()
def detect_background_color(img):
"detect background color"
# Assume that the perimeter is all background
height, width, *_ = img.shape
background_colors = np.concatenate([
img[5:height-5, 5], img[5, 5:width-5],
img[5:height-5, width-5], img[height-5, 5:width-5]
])
background_colors = background_colors.astype(np.float32)
# Assume that the background color is only one.
K = 2
iter_flg = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
_, labels, centers = cv2.kmeans(
background_colors, K, None, (iter_flg, 10, 1.0), 10,
cv2.KMEANS_RANDOM_CENTERS)
cnt1 = len(labels[labels==0])
cnt2 = len(labels[labels==1])
return centers[0] if cnt1 > cnt2 else centers[1]
def scale(img):
bg = detect_background_color(img)
return np.fix(np.sqrt(np.sum(np.square(img - bg), axis=2)) / 1.732).astype(np.uint8)
def binarize(img):
th, bit = cv2.threshold(img, 40, 255, cv2.THRESH_BINARY)
return bit
binarized = [binarize(scale(blur(img))) for img in read_images()]
show_images(binarized, 4, None)
Looks like you need to apply morphology try cv2.erode and then cv2 dilate operations.
The first will remove regions smaller than erode kernel size, the second will restore initial size of large blob. You need to apply the same size kernels for both operations.
morphology.
Check also this: medium article

Richardson-Lucy for bitmap image

I am new to Python and am trying to modify an existing Richardson-Lucy program for an image that I have.
Specifically, I have a bitmap image 'flower2.bmp' that I am using to test the following program:
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
from scipy.signal import convolve2d as conv2
from skimage import color, data, restoration
Image.open('flower2.bmp').convert('L').save('flower2_gray.bmp')
astro = Image.open('flower2_gray.bmp')
psf = np.ones((5, 5)) / 25
astro = conv2(astro, psf, 'same')
# Add Noise to Image
astro_noisy = astro.copy()
astro_noisy += (np.random.poisson(lam=25, size=astro.shape) - 10) / 255.
# Restore Image using Richardson-Lucy algorithm
deconvolved_RL = restoration.richardson_lucy(astro_noisy, psf, iterations=100)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(8, 5))
plt.gray()
for a in (ax[0], ax[1], ax[2]):
a.axis('off')
ax[0].imshow(astro)
ax[0].set_title('Original Data')
ax[1].imshow(astro_noisy)
ax[1].set_title('Noisy data')
ax[2].imshow(deconvolved_RL, vmin=astro_noisy.min(), vmax=astro_noisy.max())
ax[2].set_title('Restoration using\nRichardson-Lucy')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
Below is the image output. I would appreciate help understanding why the Restoration image is black.
Thank you.
The restoration.richardson_lucy documentation notes that the function has an optional "clip" argument, which is true by default:
clip : boolean, optional
True by default. If true, pixel value of the result above 1 or under -1 are thresholded for skimage pipeline compatibility.
However, the astro image read from Image.open has nominal range [0, 255]. I'm guessing that all pixels in astro are >= 1, so that the result after clipping to [-1, 1] is simply a constant image of ones. This could explain the plot.
The fix is to divide astro_noisy by 255 before passing it to restoration.richardson_lucy.

Google Colab Upload error, how can you fix it? (Cannot read property '_uploadFiles' of undefined)

my problem is that I always get the following error when I operate the following code.Strange thing is that, when i set the epochs to 0 the error dosnt show up and I can upload with no problems. Thanks for the Help!
I have already tried anabling third party cockies, which did not help. The strange thing is, that the upload works, if I set the training epochs to 0.
Sometimes the error is google.colab._files is undefined.
I have already tried to use Chrome and Firefox.
import tensorflow as tf
import numpy as np
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images = training_images.reshape(60000, 28, 28, 1)
training_images = training_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3,3),activation='relu', input_shape=(28,28,1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images,training_labels, epochs=1)
classes = model.predict(test_images)
predicted_classes = np.argmax(classes, axis=1)
print(classes[0])
print(test_labels[0])
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
import matplotlib.pyplot as plt
plt.imshow(test_images[0], cmap='Greys_r')
import numpy as np
from google.colab import files
from keras.preprocessing import image
import cv2
import matplotlib.pyplot as plt
uploaded = files.upload()
for fn in uploaded.keys():
path = '/content/' + fn
img = cv2.imread(path)
img = cv2.resize(img,(28,28))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x = image.img_to_array(img, dtype=np.float32)
print("top left pixel value:", x[0,0])
if x[0,0] > 250:
# white background
print("needs to be inverted!")
x -= 255
x *= -1
x = x / 255.0
x = x.reshape(1, 28, 28, 1)
plt.imshow(img, cmap='Greys_r')
plt.show()
classes = model.predict(x)
plt.bar(range(10), classes[0])
plt.show()
print("prediction: class", np.argmax(classes[0]))
TypeError: Cannot read property '_uploadFiles' of undefined
So I found out that it works if you use 2 cells one for the neural network and one for the upload feature.

Resources