When realtime camera detects some text, it would be processed through function(img_process in my code)
but it takes so long time(long lag), camera seems to be stopped.
I'd like to make camera pretend working continuously (keep taking and showing image)
code is as below
from PIL import Image #pip install pillow
from pytesseract import * #pip install pytesseract
import pytesseract
import configparser
import os
import cv2
import numpy as np
import pandas as pd
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
from concurrent.futures import ThreadPoolExecutor
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'envs' + os.sep + 'property.ini')
if __name__ == "__main__":
data = pd.read_csv('./test6.txt', sep='\t', engine='python')
print("Part N/O Scan...")
vs = VideoStream(src=1).start()
time.sleep(2.0)
fps = FPS().start()
while True:
frame = vs.read()
frame = imutils.resize(frame, width=500)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
#problem(delay) happen in this line
img_process(frame)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
fps.update()
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
vs.stop()
In that case, what I usually do is skip a few frames from the input. Add something like this in the while loop
while True:
for i in range(5): # you can adjust this number
frame = vs.read()
frame = imutils.resize(frame, width=500)
This works pretty well if you want the video output in realtime.
Related
import cv2 as cv
import numpy as np
from google.colab.patches import cv2_imshow
from tensorflow.keras.models import load_model
img_color = cv.imread('test3.jpg', cv.IMREAD_COLOR)
img_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)
ret,img_binary = cv.threshold(img_gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
kernel = cv.getStructuringElement( cv.MORPH_RECT, ( 5, 5 ) )
img_binary = cv.morphologyEx(img_binary, cv. MORPH_CLOSE, kernel)
cv2_imshow('digit', img_binary)
cv.waitKey(0)
This is the process of training a model in handwritten and testing the trained model.
I want to load an image using imshow() in colab.
Is there any way to use it without setting the file path?
You can use matplotlib's function for it
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.gcf()
fig.set_size_inches(18, 10)
plt.axis("off")
plt.rcParams['figure.figsize'] = [20, 10]
plt.imshow(img_binary)
plt.show()
I want to save contour in python. I can't do it very well.
I used plt.savefig() but image is empty.
why?
!pip install tftb
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from tftb.generators import atoms
import tftb
import cv2
mat = sio.loadmat('/content/drive/MyDrive/z5_25.mat')
signal = mat['z']
z = signal.T
images_dir = '/content/drive/MyDrive/image'
for i in range(122):
wvd = tftb.processing.WignerVilleDistribution(z[i])
wvd.run()
fig = plt.figure()
plt.rcParams['figure.figsize']=(1.5,1.5)
wvd.plot(kind = 'contour')
plt.savefig(f"{images_dir}/fig5_25_{i}.png")
plt.show()
I know it is late but you can do so by replacing plt with wvd in the line
wvd.plot(kind = 'contour') plt.savefig(f"{images_dir}/fig5_25_{i}.png")
like this:
wvd.plot(kind = 'contour') wvd.savefig(f"{images_dir}/fig5_25_{i}.png")
it should work
After I run my code I get a warning as:
"RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if name == 'main':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable."
My question is that how and where I should add this line of code to avoid this warning/error in my code below:
from scipy import stats, optimize
import pymc3 as pm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
#from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
from theano import shared
np.random.seed(9)
#Load the Data
dataset = pd.read_csv(‘PV-PCM.csv’)
X=dataset.iloc[:,[0,1,2,3,4]].values
y=dataset.iloc[:,5].values
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size = 0.2, random_state=42)
#Shapes
X.shape, y.shape, X_tr.shape, X_te.shape
#Preprocess data for Modeling
shA_X = shared(X_tr)
#Generate Model
linear_model = pm.Model()
with linear_model:
# Priors for unknown model parameters
alpha = pm.Normal("alpha", mu=y_tr.mean(),sd=10)
betas = pm.Normal("betas", mu=0, sd=1000, shape=X.shape[1])
sigma = pm.HalfNormal("sigma", sd=100) # you could also try with a HalfCauchy that has longer/fatter tails
mu = alpha + pm.math.dot(betas, X_tr.T)
likelihood = pm.Normal("likelihood", mu=mu, sd=sigma, observed=y_tr)
step = pm.NUTS()
trace = pm.sample(1000, step)
chain = trace[100:]
#pm.traceplot(chain);
#Traceplot
pm.traceplot(trace)
ppc = pm.sample_prior_predictive(samples=1000, random_seed=9)
pm.plot_posterior(trace, figsize = (12, 10))
sns.kdeplot(y_tr, alpha=0.5, lw=4, c=‘b’)
for i in range(100):
sns.kdeplot(ppc[‘likelihood’][i], alpha=0.1, c=‘g’)
alpha_pred = chain[‘alpha’].mean()
betas_pred = chain[‘betas’].mean(axis=0)
y_pred = alpha_pred + np.dot(betas_pred, X_tr.T)
Thank you all.
I had posted a question previously about the Richardson-Lucy algorithm. I have a follow-up question I would appreciate help with.
Below is the Python code I am using. My input image is already blurry so I removed program lines that I originally had to intentionally blur the image. I am getting the error "RuntimeWarning: invalid value encountered in true_divide relative_blur = image / convolve(im_deconv, psf, mode='same')" I would appreciate help with debugging this. I kept the lines in the program that I commented out based on the suggestion below.
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
from scipy.signal import convolve2d as conv2
from skimage import color, data, restoration
Image.open('TOFA-003_UV_Cured_Lincoln_Corrected.bmp').convert('L').save('TOFA-003_UV_Cured_Lincoln_Corrected_gray.bmp')
astro = Image.open('TOFA-003_UV_Cured_Lincoln_Corrected_gray.bmp')
psf = np.ones((5, 5)) / 25
#psf = np.ones((8, 8)) / 25
astro = conv2(astro, psf, 'same')
astro = astro/255
# Add Noise to Image
#astro_noisy = astro.copy()
#astro_noisy += (np.random.poisson(lam=25, size=astro.shape) - 10) / 255
#astro_noisy = astro_noisy/255
# Restore Image using Richardson-Lucy algorithm
deconvolved_RL = restoration.richardson_lucy(astro, psf, iterations=2)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(8, 5))
plt.gray()
for a in (ax[0], ax[1], ax[2]):
a.axis('off')
ax[0].imshow(astro)
ax[0].set_title('Original Data')
#ax[1].imshow(astro_noisy)
#ax[1].set_title('Noisy data')
ax[2].imshow(deconvolved_RL, vmin=astro.min(), vmax=astro.max())
ax[2].set_title('Restoration using\nRichardson-Lucy')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
I want to make program checking my foot-size
I don't know this error about opencv
url: http://cocoding94.blogspot.com/2017/05/blog-post_7.html .
import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread("foot.jpeg")
blur = cv2.blur(img,(5,10))
rows,cols,ch = img.shape
pts1 = np.float32([170,270],[480,220],[240, 710],[540,650])
pts2 = np.float32([0,0],[210,0],[0,297],[210,297])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(210,297))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.plot(*zip(*point), marker='.', color='r', ls='')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
In raspberry pi error printing:
Traceback (most recent call last): File "foot.py",line 7,in
blur = cv2. blur (img,(5,10)) cv2.error:OpenCV(3.4.3)
/home/pi/opencv/opencv-3.4.3/modules/core/src/matrix.cpp:756: error:
(-215:Assertion failed) dims <=2 && step[0] > 0 in function
'locateROI'
but please next time add more information to make this community greater than another, I fixed some errors in your code, i don´t your original image, but the code now works well, try to change your point to get a good perspective:
import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread("machupichu.jpg")
#blur = cv2.blur(img,(5,5))
#rows,cols,ch = img.shape
point=[[170,270],[480,220],[240, 710],[540,650]]
pts1 = np.float32([[170,270],[480,220],[240, 710],[540,650]])
pts2 = np.float32([[0,0],[210,0],[0,297],[210,297]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(210,297))
plt.subplot(121)
plt.imshow(img)
plt.title('Input')
plt.plot(*zip(*point), marker='.', color='r', ls='')
plt.subplot(122)
plt.imshow(dst)
plt.title('Output')
plt.show()
Here is the image which I used:
Machu Pichu
This is the result:
Best Regards.