from PyQt5 import QtGui, QtWidgets, uic
from PyQt5.QtGui import QPixmap
import sys
import cv2 #opencv-python==4.5.3
import time
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
from PyQt5.QtWidgets import QApplication, QWidget, QListWidget, QLabel, QVBoxLayout
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QPlainTextEdit,
QVBoxLayout, QWidget)
from PyQt5.QtCore import QProcess
import numpy as np
import sqlite3
from numpy.core.records import array
import torch
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
stop_threads = False
class VideoThread(QThread):
CONFIDENCE_THRESHOLD = 0.2
NMS_THRESHOLD = 0.4
COLORS = [(0, 255, 255), (255, 255, 0), (0, 255, 0), (255, 0, 0)]
frame_counter=0
class_names = []
with open("backup/coco.names",'r') as f:
class_names = [cname.strip() for cname in f.readlines()]
#net = torch.hub.load('C:/Users/Niko/Desktop/Versuch/backup/custom_yolov5s.yaml', 'custom', 'C:/Users/Niko/Desktop/Versuch/backup/best_Neu.pt', force_reload=true)
net = cv2.dnn.readNetFromONNX(r'C:/Users/Niko/Desktop/Versuch/backup/best.onnx') #r'C:/Users/Niko/Desktop/Versuch/backup/yolov5s.onnx',
#net = cv2.dnn.readNet("C:/Users/Niko/Desktop/Versuch/backup/yolov4-tiny-custom_NEU.cfg", r"C:/Users/Niko/Desktop/Versuch/backup/best_Neu.pt", 'custom', source = 'local', path='last' ) #sonst (backup/yolo.weights)"
try:
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
print("Using GPU")
except print(0):
print("Using CPU")
model = cv2.dnn_DetectionModel(net)
#model.setInputParams(size=(640,640), scale=1/255, swapRB=True) #416,416
model.setBlob(size=(640,640), scale=1/255, swapRB=True) #416,416
change_pixmap_signal = pyqtSignal(np.ndarray)
#ekran_goruntusu_ates_signal = pyqtSignal(np.ndarray)
screenshot_fire_signal = pyqtSignal(np.ndarray)
#ekran_goruntusu_duman_signal = pyqtSignal(np.ndarray)
screenshot_smoke_signal = pyqtSignal(np.ndarray)
#konsol_signal=pyqtSignal(str)
konsolen_signal=pyqtSignal(str)
#nesne_ismi_signal = pyqtSignal(str,int)
objektname_signal = pyqtSignal(str,int)
def run(self):
global stop_threads
vc = cv2.VideoCapture("drohnenvideo_feuer1080.mp4")
while cv2.waitKey(1) < 1:
(grabbed, frame) = vc.read()
if grabbed:
frame=self.window_in(frame)
self.change_pixmap_signal.emit(frame)
if stop_threads:
break
vc.release()
I want to build an custom object detection and therefore trained a Yolov5 model on a custom data set. and recieved the best weights and converted them into .onnx. This Code is working for yolov4 but for yolov5 i converted it to .onnx but unfortunately it stills brings me an error
cv2.error: OpenCV(4.5.3) C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-sn_xpupm\opencv\modules\dnn\src\onnx\onnx_importer.cpp:2146: error: (-2:Unspecified error) in function 'cv::dnn::dnn4_v20210608::ONNXImporter::handleNode'
Node [Identity]:(onnx::Reshape_582) parse error: OpenCV(4.5.3) C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-sn_xpupm\opencv\modules\dnn\src\dnn.cpp:5298: error: (-215:Assertion failed) inputs.size() in function 'cv::dnn::dnn4_v20210608::Layer::getMemoryShapes'
Related
When I use the code below to perform Semantic semgentation on my owndataset(40 images) and annotations(1 class(myface) and the annotations in cocojson format) I got just black image no mask and all bits are the same and the model accuracy in all epochs is 67.87% but loss is going down in each epoch :
import cv2
import os
import json
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.models import Model
from pycocotools.coco import COCO
from tensorflow.keras.applications import VGG16
from sklearn.utils import compute_sample_weight
folder_path = "D:\\ImageClassification\\face_semantic_segmentation\\dataset"
filenames = os.listdir(folder_path)
images = []
for filename in filenames:
img = cv2.imread(os.path.join(folder_path, filename))
img = cv2.resize(img, (256, 256))
img = np.array(img)
images.append(img)
x_train = np.array(images)
with open("D:\\ImageClassification\\face_semantic_segmentation\\annotations.json") as f:
coco = json.load(f)
annotations = coco['annotations']
masks = {}
for annotation in annotations:
image_id = annotation['image_id']
if image_id not in masks:
masks[image_id] = []
masks[image_id].append(annotation['segmentation'])
resized_masks = []
for image_id, mask in masks.items():
mask_img = np.zeros((720, 1280), dtype=np.uint8)
for segmentation in mask:
poly = np.array(segmentation).reshape((-1, 1, 2)).astype(np.int32)
cv2.fillPoly(mask_img, [poly], 1)
mask_img = cv2.resize(mask_img, (256, 256))
mask_img = np.stack([mask_img] * 3, axis=-1)
resized_masks.append(mask_img)
y_train = np.array(resized_masks)
y_train.shape
import matplotlib.pyplot as plt
import numpy as np
mask = y_train[4]
mask = np.sum(mask, axis=-1)
plt.imshow(mask)
plt.show()
from segmentation_models import Unet
from segmentation_models import get_preprocessing
from segmentation_models.losses import bce_jaccard_loss
from segmentation_models.metrics import iou_score
from tensorflow.keras.models import Model
BACKBONE = 'resnet50'
preprocess_input = get_preprocessing(BACKBONE)
model = Unet(BACKBONE,classes=2,input_shape=(256,256, 3), encoder_weights='imagenet',activation='sigmoid')
x_train = preprocess_input(x_train)
x = model.layers[-1].output
x = Conv2D(3, (1, 1), activation='sigmoid')(x)
model = Model(inputs=model.input, outputs=x)
model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['binary_accuracy'])
model.fit(x=x_train,y=y_train,batch_size=32,epochs=50)
I'm trying to get real-time face recognition for a trained VGG16 model(It has 6 classes). When I tried, I got the above error. These are my codes.
from PIL import Image
from tensorflow.keras.applications.vgg16 import preprocess_input
import base64
from io import BytesIO
import json
import random
import cv2
from keras.models import load_model
import numpy as np
from keras_preprocessing import image
model = load_model('FAceRec.h5', compile=False)
face_cas = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def face_extr(img):
faces = face_cas.detectMultiScale(img, 1.3, 5)
if faces is ():
return None
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,255), 2)
cropped_face = img[y:y+h, x:x+w]
return cropped_face
## web cam
video_cap = cv2.VideoCapture(0)
while True:
_, frame = video_cap.read()
face = face_extr(frame)
if type(face) is np.ndarray:
face = cv2.resize(face, (244, 244),3)
face = face.reshape(1, 224, 224, 3)
img = Image.fromarray(face, 'RGB')
img_array = np.array(img)
img_array = np.expand_dims(img_array, axis=0)
pred = model.predict(img_array)
print(pred)
name = "No Matching"
if(pred[1][1]>0.5):
name = "Suhail"
cv2.putText(frame, name, (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
else:
cv2.putText(frame, "No Matching Face", (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Result', frame)
k=cv2.waitKey(1)
if k==ord('q'):
break
video_cap.release()
cv2.destroyAllWindows()
can someone please help me to figure this out. even when I change the reshape with (244,244,3) it shows the same error again and again. Can somebody explain why its happening and how to solve this?
my problem is that I always get the following error when I operate the following code.Strange thing is that, when i set the epochs to 0 the error dosnt show up and I can upload with no problems. Thanks for the Help!
I have already tried anabling third party cockies, which did not help. The strange thing is, that the upload works, if I set the training epochs to 0.
Sometimes the error is google.colab._files is undefined.
I have already tried to use Chrome and Firefox.
import tensorflow as tf
import numpy as np
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images = training_images.reshape(60000, 28, 28, 1)
training_images = training_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3,3),activation='relu', input_shape=(28,28,1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images,training_labels, epochs=1)
classes = model.predict(test_images)
predicted_classes = np.argmax(classes, axis=1)
print(classes[0])
print(test_labels[0])
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
import matplotlib.pyplot as plt
plt.imshow(test_images[0], cmap='Greys_r')
import numpy as np
from google.colab import files
from keras.preprocessing import image
import cv2
import matplotlib.pyplot as plt
uploaded = files.upload()
for fn in uploaded.keys():
path = '/content/' + fn
img = cv2.imread(path)
img = cv2.resize(img,(28,28))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x = image.img_to_array(img, dtype=np.float32)
print("top left pixel value:", x[0,0])
if x[0,0] > 250:
# white background
print("needs to be inverted!")
x -= 255
x *= -1
x = x / 255.0
x = x.reshape(1, 28, 28, 1)
plt.imshow(img, cmap='Greys_r')
plt.show()
classes = model.predict(x)
plt.bar(range(10), classes[0])
plt.show()
print("prediction: class", np.argmax(classes[0]))
TypeError: Cannot read property '_uploadFiles' of undefined
So I found out that it works if you use 2 cells one for the neural network and one for the upload feature.
I want to make program checking my foot-size
I don't know this error about opencv
url: http://cocoding94.blogspot.com/2017/05/blog-post_7.html .
import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread("foot.jpeg")
blur = cv2.blur(img,(5,10))
rows,cols,ch = img.shape
pts1 = np.float32([170,270],[480,220],[240, 710],[540,650])
pts2 = np.float32([0,0],[210,0],[0,297],[210,297])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(210,297))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.plot(*zip(*point), marker='.', color='r', ls='')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
In raspberry pi error printing:
Traceback (most recent call last): File "foot.py",line 7,in
blur = cv2. blur (img,(5,10)) cv2.error:OpenCV(3.4.3)
/home/pi/opencv/opencv-3.4.3/modules/core/src/matrix.cpp:756: error:
(-215:Assertion failed) dims <=2 && step[0] > 0 in function
'locateROI'
but please next time add more information to make this community greater than another, I fixed some errors in your code, i donĀ“t your original image, but the code now works well, try to change your point to get a good perspective:
import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread("machupichu.jpg")
#blur = cv2.blur(img,(5,5))
#rows,cols,ch = img.shape
point=[[170,270],[480,220],[240, 710],[540,650]]
pts1 = np.float32([[170,270],[480,220],[240, 710],[540,650]])
pts2 = np.float32([[0,0],[210,0],[0,297],[210,297]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(210,297))
plt.subplot(121)
plt.imshow(img)
plt.title('Input')
plt.plot(*zip(*point), marker='.', color='r', ls='')
plt.subplot(122)
plt.imshow(dst)
plt.title('Output')
plt.show()
Here is the image which I used:
Machu Pichu
This is the result:
Best Regards.
I have written a code to make a Dictionary by using Multiple Image.
Here is my code =>
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.fixes import sp_version
from sklearn.datasets import load_sample_image
from scipy import ndimage
from skimage import color
from skimage import io
from PIL import Image
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import sparse_encode
from scipy.misc import imfilter, imread
from scipy.signal import convolve2d as conv2
from skimage import data, img_as_float
from scipy import ndimage as ndi
from skimage import feature
from scipy.misc import imsave
c = np.asarray(Image.open('047.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
d = np.asarray(Image.open('048.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
e = np.asarray(Image.open('049.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
f = np.asarray(Image.open('046.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
g = np.asarray(Image.open('038.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
h = np.asarray(Image.open('039.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
n0 = np.asarray(Image.open('037.jpg').convert('L').resize((512,512), Image.ANTIALIAS))
n0 = n0 / 255
height, width = n0.shape
n0 = n0 + 0.075 * np.random.randn(height, width)
imsave('noise.png',n0)
patchsize = (8,8)
t0 = time()
data1 = extract_patches_2d(c,(8,8))
data2 = extract_patches_2d(d,(8,8))
data3 = extract_patches_2d(e,(8,8))
data4 = extract_patches_2d(f,(8,8))
data5 = extract_patches_2d(g,(8,8))
data6 = extract_patches_2d(h,(8,8))
data = np.append(data1,data2,axis=0)
data = np.append(data,data3,axis=0)
data = np.append(data,data4,axis=0)
data = np.append(data,data5,axis=0)
data = np.append(data,data6,axis=0)
data = data.reshape(data.shape[0], -1)
print('Extract patch shape :',data.shape)
data = data - np.mean(data, axis=0)
data = data / np.std(data, axis=0)
t1 = time()
print('Total time : ',round((t1-t0),2),' sec')
print('Learning the dictionary ....')
t2 = time()
n_iter = 1000
dico = MiniBatchDictionaryLearning(n_components=100,alpha=3,n_iter=n_iter)
V = dico.fit(data).components_
Actually I want to train a well learned Dictionary that will help me to denoise an Image.
Is it correct way to train a dictionary By using Multiple Image?