These are my routes and some functions:
from flask import Flask, render_template, Response
import cv2
import time
from sys import stdout
from flask_socketio import SocketIO
import math
import numpy as np
import logging
import os
from camera import VideoCamera
app = Flask(__name__)
app.logger.addHandler(logging.StreamHandler(stdout))
app.config['SECRET_KEY'] = 'b13ce0c6768bb0b280bab13ceb13ce0cde280ba0c676dfde280ba245676dfde280ba0c676dfde280ba245'
app.config['DEBUG'] = True
socketio = SocketIO(app)
#socketio.on('connect', namespace='/test')
def test_connect():
app.logger.info("client connected")
#app.route('/', methods = ['GET','POST'])
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
while True:
data= camera.get_frame()
frame=data[0]
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
#app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame')
#app.route('/golden_ratio_calculating', methods = ['GET','POST'])
def calculate():
ratio = main()
return render_template('golden_calc_page.html' , ratio123 = ratio)
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
socketio.run(app, port = port)
In the code below i have used WebcamVideoStream(src = 0).start(). In this src = 0 works fine on local server but when i deployed it on heroku it not opening the webcam( i.e. it is not detecting the webcam on heroku server). Check here(the webpage): https://golden-ratio-calculator.herokuapp.com/.
import cv2
import pickle
from imutils.video import WebcamVideoStream
# import face_recognition
import time
import math
import random
import numpy as np
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0.
self.stream = WebcamVideoStream(src = 0).start()
def __del__(self):
self.stream.stop()
def get_frame(self):
image = self.stream.read()
startTime = time.time()
top2chin = []
left2right = []
top2pupil = []
pupil2lip = []
noseWidth = []
nose2lips = []
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
righteye_cascade = cv2.CascadeClassifier('haarcascade_righteye.xml')
lefteye_cascade = cv2.CascadeClassifier('haarcascade_leftteye.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_mouth.xml')
nose_cascade = cv2.CascadeClassifier('haarcascade_nose.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
height, width, channels = image.shape
for(x, y, w, h) in faces:
# print("found a face")
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+h]
roi_color = image[y:y+h, x:x+h]
eyes = eye_cascade.detectMultiScale(roi_gray, 2.5, 5)
smiles = smile_cascade.detectMultiScale(roi_gray, 3.4, 5)
noses = nose_cascade.detectMultiScale(roi_gray, 1.3, 5)
right_eyes = righteye_cascade.detectMultiScale(roi_gray, 2.5, 5)
ex, ey, ew, eh = 0,0,0,0
sx, sy, sw, sh = 0,0,0,0
nx, ny, nw, nh = 0,0,0,0
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 1)
for (sx, sy, sw, sh) in smiles:
cv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (0, 0, 255), 1)
for (nx, ny, nw, nh) in noses:
cv2.rectangle(roi_color, (nx, ny), (nx+nw, ny+nh), (255, 0, 255), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, "Hello User", (math.floor(width / 4), math.floor(height / 12)), font, 0.7, (255, 255, 255), 1, cv2.LINE_AA)
ret, jpeg = cv2.imencode('.jpg', image)
data = []
data.append(jpeg.tobytes())
# data.append(name)
return data
Please i tried almost everything. Please help me out to solve it.
Have you had a look at the Heroku docs regarding camera interfaces? There is an add-on CameraTag
Heroku Cameratag
Also you might find self-hosting your flask app on an apache2 server a more successful solution. There are many tutorials and the process is not difficult.
Heroku is great, but with my experience with face_recognition specifically, if you don't pay for upgrades you will run into issues exceeding memory <550mb.
This link here is a great tutorial for self-hosting flask app.
Deploy Flask to Apache Server
Furthermore, it may be that Heroku cannot access the local camera peripheral or the device is no longer [0]
# Using OpenCV to capture from device 0.
self.stream = WebcamVideoStream(src = 0).start()
Have you tried modifying the src?
This may also be useful for your application.
Stream webcam to html OpenCV
Related
as mentioned on title I'm trying to use ros melodic with python3. First error pop up because of cv_bridge and it has been fixed. Now I'm getting this error:
[ERROR] [1673464074.204372, 2767.036000]: bad callback: <function im_callback at 0x7f889d1aed90>
cv2.error: OpenCV(4.4.0) /tmp/pip-req-build-civioau0/opencv/modules/dnn/src/tensorflow/tf_importer.cpp:586:
error: (-2:Unspecified error) Const input blob for weights not found in function 'getConstBlob'
I checked and could not find anything about this error.
Here is my code that I'm trying to rosrun:
#! /usr/bin/env python3
import cv2
import numpy as np
import rospy
import sensor_msgs.msg as sensor
import cv_bridge
rostopic = "/iris/camera/rgb/image_raw"
rosmsg = sensor.Image
configPath = "/home/irene/catkin_ws/src/beginner_tutorials/scripts/model/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt" #file.pbtxt
modelPath = "/home/irene/catkin_ws/src/beginner_tutorials/scripts/model/frozen_inference_graph.pb" #file.pb
classesPath = "/home/irene/catkin_ws/src/beginner_tutorials/scripts/model/coco.names" #file.names
bridge = cv_bridge.CvBridge()
def im_callback(rosmsg):
global configPath, modelPath, classesPath, bridge
img = bridge.imgmsg_to_cv2(rosmsg, "bgr8")
net = cv2.dnn_DetectionModel(modelPath, configPath)
net.setInputSize(320,320)
net.setInputScale(1/127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
with open(classesPath, "r") as file:
classesList = file.read().splitlines()
classesLabelIDs, confidences, body_rects = net.detect(img, confThreshold = 0.6)
body_rects = list(body_rects)
confidences = list(np.array(confidences).reshape(1,-1)[0])
confidences = list(map(float, confidences))
bboxsIDx = cv2.dnn.NMSBoxes(body_rects, confidences, score_threshold=0.5, nms_threshold = 0.2)
if len(bboxsIDx) != 0:
for _, bID in enumerate(bboxsIDx):
classLabelID = np.squeeze(classesLabelIDs[np.squeeze(bID)])
classLabel = classesList[classLabelID]
if classLabel == "person":
body_rect = body_rects[np.squeeze(bID)]
classConfidence = confidences[np.squeeze(bID)]
display_text = "{} - {:.1}".format(classLabel, classConfidence)
x,y,w,h = body_rect
cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 1)
cv2.line(img, (x,y), (x+ int(w*.3), y), (0,0,255), 3)
cv2.line(img, (x,y), (x, y + int(h*.3)), (0,0,255), 3)
cv2.line(img, (x+w,y), (x + w - int(w*.3), y), (0,0,255), 3)
cv2.line(img, (x+w,y), (x+w, y + int(h*.3)), (0,0,255), 3)
cv2.line(img, (x+w,y+h), (x + w - int(w*.3), y + h), (0,0,255), 3)
cv2.line(img, (x+w,y+h), (x+w, y + h - int(h*.3)), (0,0,255), 3)
cv2.line(img, (x,y+h), (x + int(w*.3), y+h), (0,0,255), 3)
cv2.line(img, (x,y+h), (x, y + h -int(h*.3)), (0,0,255), 3)
cv2.putText(img, display_text, (x, y-8), cv2.FONT_HERSHEY_COMPLEX, .4, (255,255,255), 1)
cv2.imshow("Image", img)
cv2.waitKey(1)
def main():
global rosmsg, rostopic
rospy.init_node("webcam_node", anonymous=True)
rospy.Subscriber(rostopic, rosmsg, im_callback)
rospy.spin()
if __name__ == "__main__":
main()
I was trying to use ROS melodic with python3 and I got this error
As you've probably gathered, the error is because of you're trying to use Python3. Melodic targets Python2.7 exclusively and it's highly not recommended to try and make it run with Python3 for the reasons you're seeing. If you really want to use Python3 packages and dependencies in your project, you should instead be running the Noetic distro of ROS.
I'm trying to get real-time face recognition for a trained VGG16 model(It has 6 classes). When I tried, I got the above error. These are my codes.
from PIL import Image
from tensorflow.keras.applications.vgg16 import preprocess_input
import base64
from io import BytesIO
import json
import random
import cv2
from keras.models import load_model
import numpy as np
from keras_preprocessing import image
model = load_model('FAceRec.h5', compile=False)
face_cas = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def face_extr(img):
faces = face_cas.detectMultiScale(img, 1.3, 5)
if faces is ():
return None
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,255), 2)
cropped_face = img[y:y+h, x:x+w]
return cropped_face
## web cam
video_cap = cv2.VideoCapture(0)
while True:
_, frame = video_cap.read()
face = face_extr(frame)
if type(face) is np.ndarray:
face = cv2.resize(face, (244, 244),3)
face = face.reshape(1, 224, 224, 3)
img = Image.fromarray(face, 'RGB')
img_array = np.array(img)
img_array = np.expand_dims(img_array, axis=0)
pred = model.predict(img_array)
print(pred)
name = "No Matching"
if(pred[1][1]>0.5):
name = "Suhail"
cv2.putText(frame, name, (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
else:
cv2.putText(frame, "No Matching Face", (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Result', frame)
k=cv2.waitKey(1)
if k==ord('q'):
break
video_cap.release()
cv2.destroyAllWindows()
can someone please help me to figure this out. even when I change the reshape with (244,244,3) it shows the same error again and again. Can somebody explain why its happening and how to solve this?
Good evening, I have some problems correcting an image after removing grid to get area of that curve. I have tried erode and dilate after and before removing grid, but i have had a bad curve after that. Maybe you can advise me a more efficient way to correct the curve.
import sys
import cv2
import numpy as np
def remove_grid_lines(src):
clean_lines_h = remove_lines(src, np.ones((1, 10), np.uint8), np.ones((1, 5), np.uint8))
clean_lines_v = remove_lines(src, np.ones((10, 1), np.uint8), np.ones((5, 1), np.uint8))
return cv2.bitwise_not(cv2.bitwise_not(src) - clean_lines_h - clean_lines_v)
def remove_lines(src, kernel1, kernel2):
erosion = cv2.erode(src, kernel1, iterations=1)
dilation = cv2.bitwise_not(cv2.dilate(erosion, kernel1, iterations=1))
clean_lines = cv2.dilate(cv2.erode(dilation, kernel2, iterations=6), kernel2, iterations=6)
return clean_lines
def main(argv):
original_image = cv2.resize(cv2.imread(argv[0]), (640, 640))
grayscale_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
grayscale_image = cv2.GaussianBlur(grayscale_image, (3, 3), 0)
(thresh, binary_image) = cv2.threshold(grayscale_image, 120, 255, cv2.THRESH_BINARY)
binary_image = cv2.bitwise_not(binary_image)
cv2.imshow("Original", original_image)
cv2.imshow("Grayscale", grayscale_image)
cv2.imshow("Binary", binary_image)
binary_image = remove_grid_lines(binary_image)
cv2.imshow("Clean", binary_image)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == "__main__":
main(sys.argv[1:])
This project is face-recognition with barcode. It needs to detect the face first before it can scan the barcodes. The flow is fine not unless after it detect someone face the window in imshow is not responding anymore, the webcam got froze. I want the webcam to continue moving while processing my codes, how can I do that?
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open webcam") \
temp = ""
while True:
success, eImgs = cap.read()
if success:
font = cv2.FONT_HERSHEY_PLAIN
datet = str(datetime.now())
frame = cv2.putText(eImgs, datet, (10, 50), font, 1, (0, 0, 128), 1, cv2.LINE_AA)
# eImgs_v1 = cv2.resize (eImgs, (0, 0), None, 0.25, 0,25)
eImgs_v1 = cv2.cvtColor(eImgs, cv2.COLOR_BGR2RGB)
facesWebcam = face_recognition.face_locations(eImgs_v1)
encodesWebcam = face_recognition.face_encodings(eImgs_v1, facesWebcam)
for encodeKnown_v2, faceLoc in zip(encodesWebcam, facesWebcam):
facesCompared = face_recognition.compare_faces(encodeKnown, encodeKnown_v2)
faceDistance = face_recognition.face_distance(encodeKnown, encodeKnown_v2)
faceIndex = np.argmin(faceDistance)
if facesCompared[faceIndex]:
employeeName = ListNames[faceIndex]
y = employeeName
if temp == "" or temp != name:
print(name)
temp = name
if y:
print(y)
print("AUTHORIZED")
time.sleep(1)
**# Arduino and Python connection**
*arduino = serial.Serial('COM9', 115200, timeout=.1)
time.sleep(1)
print("The system is ready!")
while True:
barcode = arduino.readline()[:-2]
strbarcode = barcode.decode('utf-8')
if strbarcode:
x = strbarcode
print(x)
if y == x:
print('Have a nice day!')
time.sleep(3)
print("Next Employee please!")
else:
print('This is not yours!')*
else:
print("UNAUTHORIZED")
p1, p2, p3, p4 = faceLoc
cv2.rectangle(eImgs, (p1, p2), (p3, p4), (0, 255, 0), 2)
cv2.putText(eImgs, y, (p1, p3), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
cv2.imshow('EMPLOYEE', eImgs)
cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
I recommend you to use deepface. Its stream function applies face recognition with several state-of-the-art face recognition models.
models = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace', 'DeepID', 'Dlib', 'ArcFace']
#!pip install deepface
from deepface import DeepFace
DeepFace.stream(db_path = 'C:/my_db'
, model_name = models[0]
, enable_face_analysis = False #to disable age, gender, emotion prediction
)
So this is what I have now:
As you can see, the neural style transfer thing is only going over the area the detection box is detecting. I am trying to put the transformed cool picture (which will always be less than 1200 x 900 because the detection box is 1200 x 900) in a black picture with dimensions 1200 x 900 so that I can save the video file.
My box is measured with: startX, endX, startY, and endY. The way I am trying to put the cool picture over the background right now is: black_background[startY:endY, startX:endX] = output, where output also has the size (endY - startY, endX - startX).
My way is not working, any insights? And also, for some reason, when I do "*black_background[startY:endY, startX:endX] = output", there is often a few pixel off broadcasting issue, like can't add (859, 100, 3) with (860, 100, 3). Is there a non-buggy solution to the black background issue? I feel like manually doing *black_background[startY:endY, startX:endX] = output is weird.
Here's my full code, I marked the if loop that actually matters with -----, thank you!
from __future__ import print_function
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
from imutils import paths
import itertools
# We need to input model prototxt
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
ap.add_argument("-nm", "--neuralmodels", required=True,
help="path to directory containing neural style transfer models")
args = vars(ap.parse_args())
# we should identify the class first, and then transfer that block
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
DetectionNet = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# grab the paths to all neural style transfer models in our 'models'
# directory, provided all models end with the '.t7' file extension
modelPaths = paths.list_files(args["neuralmodels"], validExts=(".t7",))
modelPaths = sorted(list(modelPaths))
# generate unique IDs for each of the model paths, then combine the
# two lists together
models = list(zip(range(0, len(modelPaths)), (modelPaths)))
# use the cycle function of itertools that can loop over all model
# paths, and then when the end is reached, restart again
modelIter = itertools.cycle(models)
(modelID, modelPath) = next(modelIter)
NTSnet = cv2.dnn.readNetFromTorch(modelPath)
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=1).start()
fps = FPS().start()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_video = cv2.VideoWriter('output.avi', fourcc, 20.0, (1200, 900))
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1200, height=900)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
DetectionNet.setInput(blob)
detections = DetectionNet.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
if(CLASSES[idx] == "person" and confidence > .90):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format("PERSON",
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
# print box area in background
newimage = frame[startY:endY, startX:endX]
(h, w) = newimage.shape[:2]
#print(h,w)
#print(startX, endX, startY, endY)
noise_picture = cv2.imread('white_noise.jpg')
black_background = cv2.imread('black.png')
-------------------------------------------------------------------
*if(h > 0 and w > 0):
# to_be_transformed is the detection box area
# resize that area for MobileNetSSD
#to_be_transformed = imutils.resize(to_be_transformed, height=450)
(height_orig, width_orig) = noise_picture.shape[:2]
noise_picture[startY:endY, startX:endX] = newimage
noise_picture = imutils.resize(noise_picture, height=450)
# run it through the network, output is the image
(h, w) = noise_picture.shape[:2]
# print(h, w)
blob2 = cv2.dnn.blobFromImage(noise_picture, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
NTSnet.setInput(blob2)
output = NTSnet.forward()
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output /= 255.0
output = output.transpose(1, 2, 0)
# set the 600 x 450 back to the original size
black_background = imutils.resize(black_background, width=1200, height = 900)
output = imutils.resize(output, width=1200)
#black_background[startY:endY, startX:endX] = output[startY:endY, startX:endX]
output = output[startY:endY, startX:endX]
(h2, w2) = output.shape[:2]
if(h2>0 and w2>0 ):
cv2.imshow('hmm', output)
black_background[startY:endY, startX:endX] = output
cv2.imshow("uh", black_background)
#output_video.write(black_background)
#output_video.write(background)*
---------------------------------------------------------------
# show the output frame, which is the whole thing
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Oh man, second time I made this mistake. You have to do * 255 when you are adding your output picture to your background. This is really weird, it seems like imread works if you only put numbers in [0, 1], but once you have a value that goes over 1, it treats the range as [0, 255], don't take my words on it though.