How to apply Y16 codecs to YUY2 data format? - opencv

I am getting yuy2 from my camera interfaced board and I need to process it to get raw16. for that I am developing an application in opencv-pyhton after applying Y16 codec but its giving me uint8 data when I checked with 'frame.dtype'. Isn't it should be uint16?
import cv2
import numpy as np
# open video0
cap = cv2.VideoCapture(0, cv2.CAP_MSMF)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('Y','1','6',' '))
cap.set(cv2.CAP_PROP_CONVERT_RGB, 0)
cap.set(cv2.CAP_PROP_FORMAT, -1)
enter code here
if cap.isOpened():
#for i in range(10):
# Capture frame-by-frame
ret, frame = cap.read()
else:
#if not ret:
#break
ret = False
while ret:
print('frame.shape = {} frame.dtype = {}'.format(frame.shape,
frame.dtype))
cap.release()

Related

PyQt5 add text on webcam image

I would be glad if you help.
Subject: PyQt5
In Pyqt5, I can open the webcam image using opencv.
What I want to do is to add text to the point I want on the camera image. I couldn't do it anyway. I left the code I wrote below.
Note: I don't want to add text with opencv or PIL library. There are no Turkish characters in Opencv, PIL works very slowly.
Thank you from now.
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QApplication, QLabel,
QVBoxLayout,QHBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def __init__(self):
super().__init__()
self._run_flag = True
def run(self):
# capture from analog camera
cap = cv2.VideoCapture(0)
cap.set(3,720)
cap.set(4,576)
while self._run_flag:
ret, cv_img = cap.read()
if ret:
cv_img = cv2.resize(cv_img, (1024, 768))
self.change_pixmap_signal.emit(cv_img)
# shut down capture system
cap.release()
def stop(self):
#stop capture
self._run_flag = False
self.wait()
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("KONSOL")
self.disply_width = 1024
self.display_height = 768
# create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.disply_width, self.display_height)
# create a text label
self.textLabel = QLabel('XXXX')
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
def closeEvent(self, event):
self.thread.stop()
event.accept()
#pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())

Read frames from UVC camera

My camera is sporting 'GREY' and 'Y16' formats.
the output of v4l2-ctl --list-formats -d 0 # 0 is video0 is:
ioctl: VIDIOC_ENUM_FMT
Type: Video Capture
[0]: 'GREY' (8-bit Greyscale)
[1]: 'Y16 ' (16-bit Greyscale)
now if i use basic code of video streaming
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('/dev/video0')
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
cv.imshow('frame', frame)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
it is returning black image
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('G','R','E','Y'))
by setting the property of the camera, now i am able to get the frame.

Deploy pytorch model on webcam

I am trying to deploy PyTorch classifier on webcam, but always getting errors, mostly "AttributeError: 'collections.OrderedDict' object has no attribute 'load_state_dict'". The classifier is a binary classifier. Saved the model as .pt file.
Hope for your support to resolve the issue.
Here are the codes I am using:
import numpy as np
import torch
import torch.nn
import torchvision
from torch.autograd import Variable
from torchvision import transforms
import PIL
import cv2
#This is the Label
Labels = { 0 : 'Perfect',
1 : 'Defected'
}
# Let's preprocess the inputted frame
data_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(size=(224, 224)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ##Assigning the Device which will do the calculation
model = torch.load("defect_classifier.pt") #Load model to CPU
model.load_state_dict(torch.load("defect_classifier.pt"))
model = model.to(device) #set where to run the model and matrix calculation
model.eval() #set the device to eval() mode for testing
#Set the Webcam
def Webcam_720p():
cap.set(3,1280)
cap.set(4,720)
def argmax(prediction):
prediction = prediction.cpu()
prediction = prediction.detach().numpy()
top_1 = np.argmax(prediction, axis=1)
score = np.amax(prediction)
score = '{:6f}'.format(score)
prediction = top_1[0]
result = Labels[prediction]
return result,score
def preprocess(image):
image = PIL.Image.fromarray(image) #Webcam frames are numpy array format
#Therefore transform back to PIL image
print(image)
image = data_transforms(image)
image = image.float()
#image = Variable(image, requires_autograd=True)
image = image.cuda()
image = image.unsqueeze(0) #I don't know for sure but Resnet-50 model seems to only
#accpets 4-D Vector Tensor so we need to squeeze another
return image #dimension out of our 3-D vector Tensor
#Let's start the real-time classification process!
cap = cv2.VideoCapture(0) #Set the webcam
Webcam_720p()
fps = 0
show_score = 0
show_res = 'Nothing'
sequence = 0
while True:
ret, frame = cap.read() #Capture each frame
if fps == 4:
image = frame[100:450,150:570]
image_data = preprocess(image)
print(image_data)
prediction = model(image_data)
result,score = argmax(prediction)
fps = 0
if result >= 0.5:
show_res = result
show_score= score
else:
show_res = "Nothing"
show_score = score
fps += 1
cv2.putText(frame, '%s' %(show_res),(950,250), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 3)
cv2.putText(frame, '(score = %.5f)' %(show_score), (950,300), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2)
cv2.rectangle(frame,(400,150),(900,550), (250,0,0), 2)
cv2.imshow("ASL SIGN DETECTER", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyWindow("ASL SIGN DETECTER")
Dont use this line
model = torch.load("defect_classifier.pt")
instead use model = Your_model_class() , since your model is object of your model class

Python realtime camera(video) image process

When realtime camera detects some text, it would be processed through function(img_process in my code)
but it takes so long time(long lag), camera seems to be stopped.
I'd like to make camera pretend working continuously (keep taking and showing image)
code is as below
from PIL import Image #pip install pillow
from pytesseract import * #pip install pytesseract
import pytesseract
import configparser
import os
import cv2
import numpy as np
import pandas as pd
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
from concurrent.futures import ThreadPoolExecutor
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'envs' + os.sep + 'property.ini')
if __name__ == "__main__":
data = pd.read_csv('./test6.txt', sep='\t', engine='python')
print("Part N/O Scan...")
vs = VideoStream(src=1).start()
time.sleep(2.0)
fps = FPS().start()
while True:
frame = vs.read()
frame = imutils.resize(frame, width=500)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
#problem(delay) happen in this line
img_process(frame)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
fps.update()
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
vs.stop()
In that case, what I usually do is skip a few frames from the input. Add something like this in the while loop
while True:
for i in range(5): # you can adjust this number
frame = vs.read()
frame = imutils.resize(frame, width=500)
This works pretty well if you want the video output in realtime.

Getting a (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'

i am getting a (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor' while making a face detection using openCv. Here is my code:
import cv2
import numpy as np
# Load HAAR face classifier
face_classifier = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
# Load functions
def face_extractor(img):
# Function detects faces and returns the cropped face
# If no face detected, it returns the input image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return None
# Crop all faces found
for (x,y,w,h) in faces:
cropped_face = img[y:y+h, x:x+w]
return cropped_face
# Initialize Webcam
cap = cv2.VideoCapture(0)
count = 0
# Collect 100 samples of your face from webcam input
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Save file in specified directory with unique name
file_name_path = './faces/user/' + str(count) + '.jpg'
cv2.imwrite(file_name_path, face)
# Put count on images and display live count
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Face Cropper', face)
else:
print("Face not found")
pass
if cv2.waitKey(1) == 13 or count == 100: #13 is the Enter Key
break
cap.release()
cv2.destroyAllWindows()
print("Collecting Samples Complete")
and here is the output which is an error
error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
I tried converting / to \ but that resulted in another SyntaxError: EOL while scanning string literal.
Please help me solving this issue i have also tr
You need to specify the path to your haarcascade_frontalface_default.xml file.
Put double \ in your path as i did
I modified your code and hope it helps
`#importing library
import cv2
import numpy as np
# Load HAAR face classifier
face_classifier =
cv2.CascadeClassifier
('D:\\OpenCV\\opencv\\build\\etc\\Haarcascades\\haarcascade_frontalface_default.xml')
# crop the image into 20 by 20 px and change it to B&W
def face_extractor(img):
# Function detects faces and returns the cropped face
# If no face detected, it returns the input image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return None
# Crop all faces found
for (x,y,w,h) in faces:
cropped_face = img[y:y+h, x:x+w]
return cropped_face
#Asks for the total number of users
num=int(input("Enter no of user you want to add "))
user=1
# Initialize Webcam
cap = cv2.VideoCapture(0)
#count the total number of faces
count = 0
print("Start Capturing the input Data Set ")
d=input('Enter Face name')
# Collect 100 samples of your face from webcam input
while True:
ret, frame = cap.read()
print(type(frame))
#condition to check if face is in the frame or not
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Save file in specified directory with unique name
file_name_path = './faces/user/' +str(d)+'-' +str(count) + '.jpg'
cv2.imwrite(file_name_path, face)
# Put count on images and display live count
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Face Cropper', face)
else:
print("Face not found")
pass
#checks if ech user 100 images is captured
if(count%100)==0 and count!= num*100 and count!=0:
print("Place the new user Signature")
cv2.waitKey()
#checks if total images are captured
if cv2.waitKey(1) == 13 or count == num*100: #13 is the Enter Key
break
#closes all windows
cap.release()
cv2.destroyAllWindows()
print("Collecting Samples Complete")`

Resources