if(conf<50): ; SyntaxError: invalid syntax - opencv

i am buliding face recognition code with python and opencv. this line giving the error in ; .when i replace ; with > or any other operator it works but lt become unknown this shows error like this.... NameError: name 'lt' is not defined.
This is my whole code in detector.py
import cv2
import numpy as np
faceDetect= cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0);
rec= cv2.face.LBPHFaceRecognizer_create()
rec.read("recognizer\\trainingData.yml")
id=0
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
#font = cv2.InitFont(cv2.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1)
while True:
ret, img =cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray, 1.3,5)
for(x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
#print(rec.predict(gray[y:y+h,x:x+w]))
id, conf = rec.predict(gray[y:y+h,x:x+w])
if(conf&lt >= 50):
if(id==1):
id="Osama"
elif(id==2):
id="Psycho"
else:
id = conf
cv2.putText(img,str(id), (x,y+h), fontFace, fontScale, fontColor)
cv2.imshow('Face',img)
if cv2.waitKey(1) ==ord('q'):
break
cam.release()
cv2.destroyAllWindows()

So your problem is that you replace ; with an operator, when you should in fact replace the whole code (< which stands for lower than) with <.
So if(conf<50) would give you if(conf<50).
See special chars in HTML.

Related

TypeError: unsupported operand type(s) for +: 'NoneType' and 'NoneType' for Eye blinking detection

I am having a problem with "Eye blinking detection" using Python, OpenCV, and dlib. I am using Jupyter notebook.
The following code uses the shape_predictor_68_face_landmarks.dat library which plot 68 predefine points on a face.
import cv2
#import numpy as np
import dlib
from math import hypot
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("C:\\Users\\Asirajdin\\dev\\shape_predictor_68_face_landmarks
\\shape_predictor_68_face_landmarks.dat")
def midpoint (p1, p2):
return int ((p1.x + p2.x)/2), int ((p1.y + p2.y)/2)
font = cv2.FONT_HERSHEY_SIMPLEX
def get_blinking_ratio (eye_points, facial_landmarks):
left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)
right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)
centre_top = midpoint (facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))
centre_bottom = midpoint (facial_landmarks.part(eye_points[5]),
facial_landmarks.part(eye_points[4]))
hor_line = cv2.line (frame, left_point, right_point, (0, 255,0), 2)
ver_line = cv2.line (frame, centre_top, centre_bottom, (0, 255,0), 2)
hor_line_length = hypot ((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
ver_line_length = hypot((centre_top[0] - centre_bottom[0]), (centre_top[1] - centre_bottom[1]))
ratio = hor_line_length/ver_line_length
while(True):
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face in faces:
#x, y = face.left(), face.top()
#x1, y1 = face.right(), face.bottom()
#cv2.rectangle(frame,(x, y), (x1, y1), (0, 255,0), 2)
landmarks = predictor(gray, face)
left_eye_ratio = get_blinking_ratio([36, 37, 38, 39, 40, 41], landmarks)
right_eye_ratio = get_blinking_ratio([42, 43, 44, 45, 46, 47], landmarks)
blinking_ratio = ((left_eye_ratio + right_eye_ratio) / 2)
if blinking_ratio > 5.7:
cv2.putText(frame, "BLINKING", (50, 150), font, 5, (255, 0, 0))
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
# when everything is done then release the capture
cap.release()
cv2.destroyAllWindows()
Error:
TypeError Traceback (most recent call last)
<ipython-input-2-ca92d06b8ea8> in <module>
45 left_eye_ratio = get_blinking_ratio([36, 37, 38, 39, 40, 41], landmarks)
46 right_eye_ratio = get_blinking_ratio([42, 43, 44, 45, 46, 47], landmarks)
---> 47 blinking_ratio = ((left_eye_ratio + right_eye_ratio) / 2)
48
49 if blinking_ratio > 5.7:
TypeError: unsupported operand type(s) for +: 'NoneType' and 'NoneType'
I have found the solution to my post. I realised that the hor_line_length and the ver_line_length returns NoneType, then I tried to convert the ratio to integer. I returned the ratio under get_blinking_ratio as int i.e
ratio = hor_line_length/ver_line_length
return int (ratio)
I am novice in python, though the code is working but this might not be the best answer. Thanks.
your function get_blinking_ratio does not return anything. in python that means it implicitly returns None.
that's how the None values show up in that division.
after your function there are some lines that seem to deal with ratios. did you intend for those to go inside the function? then you need to indent them. they still don't contain a return statement, which is needed for a function to return anything.

I am trying to write a python code to get the perspective out of the input image using OpenCV. How to get rid of this error that comes?

This code is about warping the given image and detecting the circular shaped objects (checkers) in the image using hough transfom.
Input files for my code below
This is the input image used
A JSON file having the required dimensions used to calculate the perspective.
{
"canonical_board": {
"tl_tr_br_bl": [
[
622,
85
],
[
1477,
66
],
[
1420,
835
],
[
674,
837
]
],
"bar_width_to_checker_width": 0.716,
"board_width_to_board_height": 1.03,
"pip_length_to_board_height": 0.36
}
}
My code
#import necessary packages
import cv2
import json
import numpy as np
from operator import itemgetter
from glob import glob
#load file
input_file=open('3913.jpg.info.json', 'r')
json_decode = json.load(input_file)
result = []
result.append(json_decode['canonical_board']['tl_tr_br_bl'])
result.append(json_decode['canonical_board']['bar_width_to_checker_width'])
result.append(json_decode['canonical_board']['board_width_to_board_height'])
result.append(json_decode['canonical_board']['pip_length_to_board_height'])
print("tl_tr_br_bl:",result[0])
print("bar_width_to_checker_width:",result[1])
print("board_width_to_board_height",result[2])
print("pip_length_to_board_height",result[3])
normal_img = cv2.imread('3913.jpg')
pts1 = np.float32([[454, 83], [1240, 79], [1424, 808], [275, 842]])
pts2 = np.array([[0.397],[0.986],[0.402]], dtype=np.float32)
M = cv2.getPerspectiveTransform(pts1.astype(np.float32), pts2)
dst = cv2.warpPerspective(normal_img, M, (1300, 800))
#perspective of the original image shown
cv2.imshow(dst)
#converting the image into grayscale
gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
#locating the circles using hough transform
# detect circles in the image
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 100)
# ensure at least some circles were found
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
no_of_circles = len(circles)
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.imshow("output", np.hstack([image, output]))
cv2.waitKey(0)
#number of circles
print("number of circles detected-",no_of_circles)
Error I am getting
error Traceback (most recent call last)
<ipython-input-12-efcd2ec83d0c> in <module>
37 pts2 = np.array([[0.397],[0.986],[0.402]], dtype=np.float32)
38
---> 39 M = cv2.getPerspectiveTransform(pts1.astype(np.float32), pts2)
40
41 dst = cv2.warpPerspective(normal_img, M, (1300, 800))
error: OpenCV(4.1.2) /Users/travis/build/skvark/opencv-python/opencv/modules/imgproc/src/imgwarp.cpp:3391: error: (-215:Assertion failed) src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4 in function 'getPerspectiveTransform'
your pts2 array is wrong. it needs to be four points, not three. and the points need to be two-dimensional, not one-dimensional.

error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize' OpenCV

I have this old code that is used to run fine in Python 2.7 a while ago. I just updated the code to run in Python 3.8, but when I try to execute it code in Python 3.8 and OpenCV 3.4 I get a resize error and a warning (below)!
Here is the link to the two tif images that are required to run this code.
It's worth noting that both tif images are in the same folder as the Python code
import cv2
import matplotlib.pyplot as plt
import numpy as np
## Code for C_preferred Mask and C_images##
## There are three outputs to this code:
#"Block_order_C.PNG"
#"Out_img.PNG"
#"Output_C.txt"
## Change the image name here
filename_image = '2.tif'
filename_mask = '1.tif'
## OpenCV verison Checking
#print 'OpenCV version used', cv2.__version__
filename = open("Output_C.txt","w")
filename.write("Processing Image : " + str(filename_image) + '\n\n')
## Function to sort the contours : Parameters that you can tune : tolerance_factor and size 0f the image.Here, I have used a fix size of
## (800,800)
def get_contour_precedence(contour, cols):
tolerance_factor = 10
origin = cv2.boundingRect(contour)
return ((origin[1] // tolerance_factor) * tolerance_factor) * cols + origin[0]
## Loading the colored mask, resizing it to (800,800) and converting it from RGB to HSV space, so that the color values are emphasized
p_mask_c = cv2.cvtColor(cv2.resize(cv2.imread(filename_mask),(800,800)),cv2.COLOR_RGB2HSV);
# Loading the original Image
b_image_1 = cv2.resize(cv2.imread(filename_image),(800,800));
cv2.imshow("c_mask_preferred",p_mask_c)
cv2.waitKey();
# convert the target color to HSV, As our target mask portion to be considered is green. So I have chosen target color to be green
b = 0;
g = 255;
r = 0;
# Converting target color to HSV space
target_color = np.uint8([[[b, g, r]]])
target_color_hsv = cv2.cvtColor(target_color, cv2.COLOR_BGR2HSV)
# boundaries for Hue define the proper color boundaries, saturation and values can vary a lot
target_color_h = target_color_hsv[0,0,0]
tolerance = 20
lower_hsv = np.array([max(0, target_color_h - tolerance), 10, 10])
upper_hsv = np.array([min(179, target_color_h + tolerance), 250, 250])
# apply threshold on hsv image
mask = cv2.inRange(p_mask_c, lower_hsv, upper_hsv)
cv2.imshow("mask",mask)
cv2.waitKey()
# Eroding the binary mask, such that every white portion (grids) are seperated from each other, to avoid overlapping and mixing of
# adjacent grids
b_mask = mask;
kernel = np.ones((5,5))
#kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
sharp = cv2.erode(b_mask,kernel, iterations=2)
# Finding all the grids (from binary image)
contours, hierarchy = cv2.findContours(sharp,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
print (' Number of contours', len(contours))
# Sorting contours
contours.sort(key=lambda x:get_contour_precedence(x, np.shape(b_mask)[0]))
#cv2.drawContours(b_image_1, contours, -1, (0,255,0), 1)
# Label variable for each grid/panel
label = 1;
b_image = b_image_1.copy();
temp =np.zeros(np.shape(b_image_1),np.uint8)
print (' size of temp',np.shape(temp), np.shape(b_image))
out_img = b_image_1.copy()
# Processing in each contour/label one by one
for cnt in contours:
cv2.drawContours(b_image_1,[cnt],0,(255,255,0), 1)
## Just to draw labels in the center of each grid
((x, y), r) = cv2.minEnclosingCircle(cnt)
x = int(x)
y = int(y)
r = int(r)
cv2.putText(b_image_1, "#{}".format(label), (int(x) - 10, int(y)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
##
cv2.drawContours(temp,[cnt],0,(255,255,255), -1)
#crop_img = np.bitwise_and(b_image,temp)
r = cv2.boundingRect(cnt)
crop_img = b_image[r[1]:r[1]+r[3], r[0]:r[0]+r[2]]
mean = cv2.mean(crop_img);
mean = np.array(mean).reshape(-1,1)
print (' Mean color', mean, np.shape(mean))
if mean[1] < 50:
cv2.putText(out_img, "M", (int(x) - 10, int(y)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 1)
filename.write("Block number #"+ str(label)+ ' is : ' + 'Magenta'+'\n');
else:
cv2.putText(out_img, "G", (int(x) - 10, int(y)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 1)
filename.write("Block number #"+ str(label)+ ' is : ' +'Gray'+'\n');
label = label+1;
cv2.imwrite("Block_order_C.PNG",b_image_1)
cv2.imwrite("Out_img.PNG",out_img)
filename.close()
cv2.imshow("preferred",b_image_1)
cv2.waitKey()
Error
[ WARN:0] global C:\projects\opencv-python\opencv\modules\imgcodecs\src\grfmt_tiff.cpp (449) cv::TiffDecoder::readData OpenCV TIFF: TIFFRGBAImageOK: Sorry, can not handle images with IEEE floating-point samples
Traceback (most recent call last):
File "Processing_C_preferred.py", line 32, in
p_mask_c = cv2.cvtColor(cv2.resize(cv2.imread(filename_mask),(800,800)),cv2.COLOR_RGB2HSV);
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\resize.cpp:4045: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
When you read in the image pass the cv::IMREAD_ANYDEPTH = 2 parameter as the second parameter in cv2.imread().
Changing your lines to
p_mask_c = cv2.cvtColor(cv2.resize(cv2.imread(filename_mask, 2),(800,800)),cv2.COLOR_RGB2HSV);
and
b_image_1 = cv2.resize(cv2.imread(filename_image, 2),(800,800));
removes the resize error you're seeing.
But you get another error when changing the color since your TIFF image apparently has only one channel so cv2.COLOR_RGB2HSV won't work..
You could also use multiple flags like cv::IMREAD_COLOR = 1,
p_mask_c = cv2.cvtColor(cv2.resize(cv2.imread(filename_mask, 2 | 1),(800,800)),cv2.COLOR_BGR2HSV);
to read in a color image. But you get a different error. Perhaps you understand this image better than I do and can solve the problem from here on out.

ValueError: could not broadcast input array from shape (700,227,3) into shape (0,227,3)

Please help me to rectify the errors. This is an Opencv feature extraction code.
from __future__ import division
import numpy as np
import cv2
ESC=27
camera = cv2.VideoCapture(0)
orb = cv2.ORB()
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
imgTrainColor=cv2.imread('train.jpg')
imgTrainGray = cv2.cvtColor(imgTrainColor, cv2.COLOR_BGR2GRAY)
kpTrain = orb.detect(imgTrainGray,None)
kpTrain, desTrain = orb.compute(imgTrainGray, kpTrain)
firsttime=True
while True:
ret, imgCamColor = camera.read()
imgCamGray = cv2.cvtColor(imgCamColor, cv2.COLOR_BGR2GRAY)
kpCam = orb.detect(imgCamGray,None)
kpCam, desCam = orb.compute(imgCamGray, kpCam)
matches = bf.match(desCam,desTrain)
dist = [m.distance for m in matches]
thres_dist = (sum(dist) / len(dist)) * 0.5
matches = [m for m in matches if m.distance < thres_dist]
if firsttime==True:
h1, w1 = imgCamColor.shape[:2]
h2, w2 = imgTrainColor.shape[:2]
nWidth = w1+w2
nHeight = max(h1, h2)
hdif = (h1-h2)/2
firsttime=False
result = np.zeros((nHeight, nWidth, 3), np.uint8)
result[hdif:hdif+h2, :w2] = imgTrainColor
result[:h1, w2:w1+w2] = imgCamColor
for i in range(len(matches)):
pt_a=(int(kpTrain[matches[i].trainIdx].pt[0]), int(kpTrain[matches[i].trainIdx].pt[1]+hdif))
pt_b=(int(kpCam[matches[i].queryIdx].pt[0]+w2), int(kpCam[matches[i].queryIdx].pt[1]))
cv2.line(result, pt_a, pt_b, (255, 0, 0))
cv2.imshow('Camara', result)
key = cv2.waitKey(30)
if key == ESC:
break
cv2.destroyAllWindows()
camera.release()
ERRORS APPEARING:
Traceback (most recent call last):
File "sift.py", line 39, in
result[hdif:hdif+h2, :w2] = imgTrainColor
ValueError: could not broadcast input array from shape (700,227,3) into shape (0,227,3)
Without digging through your code in detail
result[hdif:hdif+h2, :w2] = imgTrainColor
... from shape (700,227,3) into shape (0,227,3)
I duduce that imgTrainColor is 3d with shape (700,227,3).
result must has (3,) last dimension; the :w2 must be slicing 227 vales. But the hdif:hdif+h2 is slicing 0, probably because h2 is 0.
In other words, you are trying to put the imgTrainColor values into a block of result that is too small.
Can I leave to you to figure out why h2 is wrong? Another possibility is the hdif is too large (>700). You may need to print those indexing values just before this error.
Oh, and clean up the indentation.

'CV_LOAD_IMAGE_GRAYSCALE' is not defined{PY}

I'm trying to make run a face recognition program using OpenCV and Python.
I found this code here on stackoverflow, but the main problem is an error which says:
Traceback (most recent call last):
File "/Users/n1/Desktop/FaceDetection/face.py", line 8, in <module>
gray = imread(fname, CV_LOAD_IMAGE_GRAYSCALE )
NameError: name 'CV_LOAD_IMAGE_GRAYSCALE' is not defined
The code is this one :
from cv2 import *
import numpy as np
face_cascade = CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = CascadeClassifier('haarcascade_eye.xml')
fname='123.jpg'
img = imread(fname)
gray = imread(fname, CV_LOAD_IMAGE_GRAYSCALE ( 0) )
rows,cols = gray.shape
gray = np.array(gray, dtype='uint8')
faces = face_cascade.detectMultiScale(gray, 1.3, 5, 0)
print ('faces=', faces)
for (x,y,w,h) in faces:
rectangle(img, (x,y), ((x+w),(x+h)), (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey, ew, eh) in eyes:
rectangle(roi_color, (x,y), ((x+w), (y+h)), (50, 50, 50), 3)
imshow('eyes=%s' % (eyes,), roi_color)
imshow("img", img)
waitKey(0)
destroyAllWindows()
>>> import cv2
>>> help(cv2)
...
IMREAD_ANYCOLOR = 4
IMREAD_ANYDEPTH = 2
IMREAD_COLOR = 1
IMREAD_GRAYSCALE = 0 #that will be it ;)
IMREAD_LOAD_GDAL = 8
IMREAD_UNCHANGED = -1
...
VERSION
3.0.0-dev
( CV_LOAD_IMAGE_GRAYSCALE is from the outdated [and now removed] cv api )
In OpenCV 3.1 for C++ you have to use cv::ImreadModes::IMREAD_GRAYSCALE which is located on <opencv2/imgcodecs.hpp>
simpy change: "CV_LOAD_IMAGE_GRAYSCALE"
to: "IMREAD_GRAYSCALE"
it will work.

Resources