as mentioned on title I'm trying to use ros melodic with python3. First error pop up because of cv_bridge and it has been fixed. Now I'm getting this error:
[ERROR] [1673464074.204372, 2767.036000]: bad callback: <function im_callback at 0x7f889d1aed90>
cv2.error: OpenCV(4.4.0) /tmp/pip-req-build-civioau0/opencv/modules/dnn/src/tensorflow/tf_importer.cpp:586:
error: (-2:Unspecified error) Const input blob for weights not found in function 'getConstBlob'
I checked and could not find anything about this error.
Here is my code that I'm trying to rosrun:
#! /usr/bin/env python3
import cv2
import numpy as np
import rospy
import sensor_msgs.msg as sensor
import cv_bridge
rostopic = "/iris/camera/rgb/image_raw"
rosmsg = sensor.Image
configPath = "/home/irene/catkin_ws/src/beginner_tutorials/scripts/model/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt" #file.pbtxt
modelPath = "/home/irene/catkin_ws/src/beginner_tutorials/scripts/model/frozen_inference_graph.pb" #file.pb
classesPath = "/home/irene/catkin_ws/src/beginner_tutorials/scripts/model/coco.names" #file.names
bridge = cv_bridge.CvBridge()
def im_callback(rosmsg):
global configPath, modelPath, classesPath, bridge
img = bridge.imgmsg_to_cv2(rosmsg, "bgr8")
net = cv2.dnn_DetectionModel(modelPath, configPath)
net.setInputSize(320,320)
net.setInputScale(1/127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
with open(classesPath, "r") as file:
classesList = file.read().splitlines()
classesLabelIDs, confidences, body_rects = net.detect(img, confThreshold = 0.6)
body_rects = list(body_rects)
confidences = list(np.array(confidences).reshape(1,-1)[0])
confidences = list(map(float, confidences))
bboxsIDx = cv2.dnn.NMSBoxes(body_rects, confidences, score_threshold=0.5, nms_threshold = 0.2)
if len(bboxsIDx) != 0:
for _, bID in enumerate(bboxsIDx):
classLabelID = np.squeeze(classesLabelIDs[np.squeeze(bID)])
classLabel = classesList[classLabelID]
if classLabel == "person":
body_rect = body_rects[np.squeeze(bID)]
classConfidence = confidences[np.squeeze(bID)]
display_text = "{} - {:.1}".format(classLabel, classConfidence)
x,y,w,h = body_rect
cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 1)
cv2.line(img, (x,y), (x+ int(w*.3), y), (0,0,255), 3)
cv2.line(img, (x,y), (x, y + int(h*.3)), (0,0,255), 3)
cv2.line(img, (x+w,y), (x + w - int(w*.3), y), (0,0,255), 3)
cv2.line(img, (x+w,y), (x+w, y + int(h*.3)), (0,0,255), 3)
cv2.line(img, (x+w,y+h), (x + w - int(w*.3), y + h), (0,0,255), 3)
cv2.line(img, (x+w,y+h), (x+w, y + h - int(h*.3)), (0,0,255), 3)
cv2.line(img, (x,y+h), (x + int(w*.3), y+h), (0,0,255), 3)
cv2.line(img, (x,y+h), (x, y + h -int(h*.3)), (0,0,255), 3)
cv2.putText(img, display_text, (x, y-8), cv2.FONT_HERSHEY_COMPLEX, .4, (255,255,255), 1)
cv2.imshow("Image", img)
cv2.waitKey(1)
def main():
global rosmsg, rostopic
rospy.init_node("webcam_node", anonymous=True)
rospy.Subscriber(rostopic, rosmsg, im_callback)
rospy.spin()
if __name__ == "__main__":
main()
I was trying to use ROS melodic with python3 and I got this error
As you've probably gathered, the error is because of you're trying to use Python3. Melodic targets Python2.7 exclusively and it's highly not recommended to try and make it run with Python3 for the reasons you're seeing. If you really want to use Python3 packages and dependencies in your project, you should instead be running the Noetic distro of ROS.
Related
I want to extract two digits and an operator (e.g. 14 + 23) from an image, but this script that I wrote, won't work:
import cv2 as cv
import pytesseract
img = cv.imread('VerificationImage.jpg')
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
print(pytesseract.image_to_string(img))
conf = r'--oem 3 --psm 11 outputbase digits'
boxes = pytesseract.image_to_data(img, config=conf)
for x, b in enumerate(boxes.splitlines()):
if x != 0:
b = b.split()
print(b)
if len(b) == 12:
x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
cv.rectangle(img, (x,y), (x+w, y+h), (0, 120, 0), 2)
cv.imshow("display", img)
cv.waitKey()
The sample image is like this:
This project is face-recognition with barcode. It needs to detect the face first before it can scan the barcodes. The flow is fine not unless after it detect someone face the window in imshow is not responding anymore, the webcam got froze. I want the webcam to continue moving while processing my codes, how can I do that?
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open webcam") \
temp = ""
while True:
success, eImgs = cap.read()
if success:
font = cv2.FONT_HERSHEY_PLAIN
datet = str(datetime.now())
frame = cv2.putText(eImgs, datet, (10, 50), font, 1, (0, 0, 128), 1, cv2.LINE_AA)
# eImgs_v1 = cv2.resize (eImgs, (0, 0), None, 0.25, 0,25)
eImgs_v1 = cv2.cvtColor(eImgs, cv2.COLOR_BGR2RGB)
facesWebcam = face_recognition.face_locations(eImgs_v1)
encodesWebcam = face_recognition.face_encodings(eImgs_v1, facesWebcam)
for encodeKnown_v2, faceLoc in zip(encodesWebcam, facesWebcam):
facesCompared = face_recognition.compare_faces(encodeKnown, encodeKnown_v2)
faceDistance = face_recognition.face_distance(encodeKnown, encodeKnown_v2)
faceIndex = np.argmin(faceDistance)
if facesCompared[faceIndex]:
employeeName = ListNames[faceIndex]
y = employeeName
if temp == "" or temp != name:
print(name)
temp = name
if y:
print(y)
print("AUTHORIZED")
time.sleep(1)
**# Arduino and Python connection**
*arduino = serial.Serial('COM9', 115200, timeout=.1)
time.sleep(1)
print("The system is ready!")
while True:
barcode = arduino.readline()[:-2]
strbarcode = barcode.decode('utf-8')
if strbarcode:
x = strbarcode
print(x)
if y == x:
print('Have a nice day!')
time.sleep(3)
print("Next Employee please!")
else:
print('This is not yours!')*
else:
print("UNAUTHORIZED")
p1, p2, p3, p4 = faceLoc
cv2.rectangle(eImgs, (p1, p2), (p3, p4), (0, 255, 0), 2)
cv2.putText(eImgs, y, (p1, p3), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
cv2.imshow('EMPLOYEE', eImgs)
cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
I recommend you to use deepface. Its stream function applies face recognition with several state-of-the-art face recognition models.
models = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace', 'DeepID', 'Dlib', 'ArcFace']
#!pip install deepface
from deepface import DeepFace
DeepFace.stream(db_path = 'C:/my_db'
, model_name = models[0]
, enable_face_analysis = False #to disable age, gender, emotion prediction
)
These are my routes and some functions:
from flask import Flask, render_template, Response
import cv2
import time
from sys import stdout
from flask_socketio import SocketIO
import math
import numpy as np
import logging
import os
from camera import VideoCamera
app = Flask(__name__)
app.logger.addHandler(logging.StreamHandler(stdout))
app.config['SECRET_KEY'] = 'b13ce0c6768bb0b280bab13ceb13ce0cde280ba0c676dfde280ba245676dfde280ba0c676dfde280ba245'
app.config['DEBUG'] = True
socketio = SocketIO(app)
#socketio.on('connect', namespace='/test')
def test_connect():
app.logger.info("client connected")
#app.route('/', methods = ['GET','POST'])
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
while True:
data= camera.get_frame()
frame=data[0]
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
#app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame')
#app.route('/golden_ratio_calculating', methods = ['GET','POST'])
def calculate():
ratio = main()
return render_template('golden_calc_page.html' , ratio123 = ratio)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
socketio.run(app, port = port)
In the code below i have used WebcamVideoStream(src = 0).start(). In this src = 0 works fine on local server but when i deployed it on heroku it not opening the webcam( i.e. it is not detecting the webcam on heroku server). Check here(the webpage): https://golden-ratio-calculator.herokuapp.com/.
import cv2
import pickle
from imutils.video import WebcamVideoStream
# import face_recognition
import time
import math
import random
import numpy as np
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0.
self.stream = WebcamVideoStream(src = 0).start()
def __del__(self):
self.stream.stop()
def get_frame(self):
image = self.stream.read()
startTime = time.time()
top2chin = []
left2right = []
top2pupil = []
pupil2lip = []
noseWidth = []
nose2lips = []
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
righteye_cascade = cv2.CascadeClassifier('haarcascade_righteye.xml')
lefteye_cascade = cv2.CascadeClassifier('haarcascade_leftteye.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_mouth.xml')
nose_cascade = cv2.CascadeClassifier('haarcascade_nose.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
height, width, channels = image.shape
for(x, y, w, h) in faces:
# print("found a face")
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+h]
roi_color = image[y:y+h, x:x+h]
eyes = eye_cascade.detectMultiScale(roi_gray, 2.5, 5)
smiles = smile_cascade.detectMultiScale(roi_gray, 3.4, 5)
noses = nose_cascade.detectMultiScale(roi_gray, 1.3, 5)
right_eyes = righteye_cascade.detectMultiScale(roi_gray, 2.5, 5)
ex, ey, ew, eh = 0,0,0,0
sx, sy, sw, sh = 0,0,0,0
nx, ny, nw, nh = 0,0,0,0
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 1)
for (sx, sy, sw, sh) in smiles:
cv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (0, 0, 255), 1)
for (nx, ny, nw, nh) in noses:
cv2.rectangle(roi_color, (nx, ny), (nx+nw, ny+nh), (255, 0, 255), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, "Hello User", (math.floor(width / 4), math.floor(height / 12)), font, 0.7, (255, 255, 255), 1, cv2.LINE_AA)
ret, jpeg = cv2.imencode('.jpg', image)
data = []
data.append(jpeg.tobytes())
# data.append(name)
return data
Please i tried almost everything. Please help me out to solve it.
Have you had a look at the Heroku docs regarding camera interfaces? There is an add-on CameraTag
Heroku Cameratag
Also you might find self-hosting your flask app on an apache2 server a more successful solution. There are many tutorials and the process is not difficult.
Heroku is great, but with my experience with face_recognition specifically, if you don't pay for upgrades you will run into issues exceeding memory <550mb.
This link here is a great tutorial for self-hosting flask app.
Deploy Flask to Apache Server
Furthermore, it may be that Heroku cannot access the local camera peripheral or the device is no longer [0]
# Using OpenCV to capture from device 0.
self.stream = WebcamVideoStream(src = 0).start()
Have you tried modifying the src?
This may also be useful for your application.
Stream webcam to html OpenCV
I have hundred Entries in csv file.
Physics,Maths,Status_class0or1
30,40,0
90,70,1
Using above data i am trying to build logistic (binary) classifier.
Please advise me where i am doing wrong ? Why i am getting answer in 3*3 Matrix (9 values of theta, where as it should be 3 only)
Here is code:
importing the libraries
import numpy as np
import pandas as pd
from sklearn import preprocessing
reading data from csv file.
df = pd.read_csv("LogisticRegressionFirstBinaryClassifier.csv", header=None)
df.columns = ["Maths", "Physics", "AdmissionStatus"]
X = np.array(df[["Maths", "Physics"]])
y = np.array(df[["AdmissionStatus"]])
X = preprocessing.normalize(X)
X = np.c_[np.ones(X.shape[0]), X]
theta = np.ones((X.shape[1], 1))
print(X.shape) # (100, 3)
print(y.shape) # (100, 1)
print(theta.shape) # (3, 1)
calc_z to caculate dot product of X and theta
def calc_z(X,theta):
return np.dot(X,theta)
Sigmoid function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
Cost_function
def cost_function(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
print("cost_function =" , cost_function(X, y, theta))
def derivativeofcostfunction(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
calculation = np.dot((h - y).T,X)
return calculation
print("derivativeofcostfunction=", derivativeofcostfunction(X, y, theta))
def grad_desc(X, y, theta, lr=.001, converge_change=.001):
cost = cost_function(X, y, theta)
change_cost = 1
num_iter = 1
while(change_cost > converge_change):
old_cost = cost
print(theta)
print (derivativeofcostfunction(X, y, theta))
theta = theta - lr*(derivativeofcostfunction(X, y, theta))
cost = cost_function(X, y, theta)
change_cost = old_cost - cost
num_iter += 1
return theta, num_iter
Here is the output :
[[ 0.4185146 -0.56877556 0.63999433]
[15.39722864 9.73995197 11.07882445]
[12.77277463 7.93485324 9.24909626]]
[[0.33944777 0.58199037 0.52493407]
[0.02106587 0.36300629 0.30297278]
[0.07040604 0.3969297 0.33737757]]
[[-0.05856159 -0.89826735 0.30849185]
[15.18035041 9.59004868 10.92827046]
[12.4804775 7.73302024 9.04599788]]
[[0.33950634 0.58288863 0.52462558]
[0.00588552 0.35341624 0.29204451]
[0.05792556 0.38919668 0.32833157]]
[[-5.17526527e-01 -1.21534937e+00 -1.03387571e-02]
[ 1.49729502e+01 9.44663458e+00 1.07843504e+01]
[ 1.21978140e+01 7.53778010e+00 8.84964495e+00]]
(array([[ 0.34002386, 0.58410398, 0.52463592],
[-0.00908743, 0.34396961, 0.28126016],
[ 0.04572775, 0.3816589 , 0.31948193]]), 46)
I changed this code , just added Transpose while returning the matrix and it fixed my issue.
def derivativeofcostfunction(X, y, theta):
z = calc_z(X,theta)
h = sigmoid(z)
calculation = np.dot((h - y).T,X)
return calculation.T
I'm trying to use the method cv2.estimateAffine3D but without success. Here is my code sample :
import numpy as np
import cv2
shape = (1, 4, 3)
source = np.zeros(shape, np.float32)
# [x, y, z]
source[0][0] = [857, 120, 854]
source[0][1] = [254, 120, 855]
source[0][2] = [256, 120, 255]
source[0][3] = [858, 120, 255]
target = source * 10
retval, M, inliers = cv2.estimateAffine3D(source, target)
When I try to run this sample, I obtain the same error as this other post here.
I'm using OpenCV 2.4.3 and Python 2.7.3
Please help me!
This is a known bug that is fixed in 2.4.4.
http://code.opencv.org/issues/2375
If you just need rigid (rotation + translation) alignment, here's the standard method:
def get_rigid(src, dst): # Assumes both or Nx3 matrices
src_mean = src.mean(0)
dst_mean = dst.mean(0)
# Compute covariance
H = reduce(lambda s, (a,b) : s + np.outer(a, b), zip(src - src_mean, dst - dst_mean), np.zeros((3,3)))
u, s, v = np.linalg.svd(H)
R = v.T.dot(u.T) # Rotation
T = - R.dot(src_mean) + dst_mean # Translation
return np.hstack((R, T[:, np.newaxis]))
Change covariance toH = reduce(lambda s, a: s + np.outer(a[0], a[1]), zip(src - src_mean, dst - dst_mean), np.zeros((3,3)))
for python3 in previous post. Can't comment bacause of reputation score.