I am currently working on a code on how to paint on facial landmarks in real-time using opencv and face_recognition module. I saw a source code on the internet about painting over an image using PIL and face_recognition, I was wondering what module is the counterpart of PIL in terms of manipulating videos? I want to find the landmarks of the face that is showing on the webcam and paint on those landmarks (example: eyebrows, lips etc)
This is my current code:
from PIL import Image, ImageDraw
import face_recognition
import cv2
video_capture = cv2.VideoCapture(0)
face_locations = []
process_this_frame = True
face_landmarks_list = []
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx = 0.25, fy = 0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_landmarks_list = face_recognition.face_landmarks(rgb_small_frame)
for face_landmarks in face_landmarks_list:
pil_image = Image.fromarray(rgb_small_frame)
d = ImageDraw.Draw(pil_image, 'RGBA')
# Make the eyebrows into a nightmare
d.polygon(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 128))
d.polygon(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 128))
d.line(face_landmarks['left_eyebrow'], fill=(68, 54, 39, 150), width=5)
d.line(face_landmarks['right_eyebrow'], fill=(68, 54, 39, 150), width=5)
# Gloss the lips
d.polygon(face_landmarks['top_lip'], fill=(150, 0, 0, 128))
d.polygon(face_landmarks['bottom_lip'], fill=(150, 0, 0, 128))
d.line(face_landmarks['top_lip'], fill=(150, 0, 0, 64), width=8)
d.line(face_landmarks['bottom_lip'], fill=(150, 0, 0, 64), width=8)
# Sparkle the eyes
d.polygon(face_landmarks['left_eye'], fill=(255, 255, 255, 30))
d.polygon(face_landmarks['right_eye'], fill=(255, 255, 255, 30))
# Apply some eyeliner
d.line(face_landmarks['left_eye'] + [face_landmarks['left_eye'][0]], fill=(0, 0, 0, 110), width=6)
d.line(face_landmarks['right_eye'] + [face_landmarks['right_eye'][0]], fill=(0, 0, 0, 110), width=6)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
Related
I am looking for a method to detect blurry areas in an image. I want to be able to select areas that are blurry. The most interesting is the motion blur.
For example: I made a photo of moving coin and want to detect blurred areas from left and right
Another one:
I tried several methods and the gradient search turned out to be the best. Here the result:
But this method is absolutely not suitable for a non-uniform background. And I can't find blurred areas on photo with car
used code:
import cv2
import numpy as np
import blure as bl
def put_mask(image, mask):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return cv2.filter2D(src=gray, ddepth=-1, kernel=mask)
width, height, x, y = 550, 400, 50, 100
img = cv2.imread("car.jpg")
image = img[y:y+height, x:x+width]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
mask_1 = np.array([ [1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
mask_2 = np.array([ [1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
masked_1 = cv2.filter2D(gray, ddepth=-1, kernel=mask_1)
masked_2 = cv2.filter2D(gray, ddepth=-1, kernel=mask_2)
masked = cv2.bitwise_or(masked_1, masked_2)
cv2.imshow("edges", image)
cv2.imshow("grad", masked)
cv2.waitKey(0)
cv2.destroyAllWindows()
So, I am trying to measure the line (check the attached code and result), the problem is when creating the boxplot points to measure the line it also creating boxplot points of the whole canvas of window no matter the image or thresholding value is. I also tried to limit in contourarea(c) command but couldn't limit it (don't know why). Kindly improvise the code or explain where I can counter this problem of mine in layman language as I am a newbee.
Thankyou!
import cv2
import numpy as np
import imutils
import imutils.perspective as persp
import scipy.spatial.distance as dist
print("PRESSED MODE 1 CONTOUR")
# cv2.destroyAllWindows()
img = cv2.imread('opencv_frame_3.png')
# img2 = cv2.imread('shapes.jpg')
# rgb_img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
rgb_img = cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)
# thresh, thresh_img = cv2.threshold(gray,90,255,cv2.THRESH_BINARY)
# thresh_img = cv2.erode(thresh_img, None, iterations=5)
# thresh_img = cv2.dilate(thresh_img, None, iterations=1)
# rgb_img_thresh = cv2.cvtColor(thresh_img, cv2.COLOR_BGR2RGB)
thresh_img = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,17,5)
# thresh_img = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,175,15)
# thr = cv2.cvtColor(thr, cv2.COLOR_BGR2RGB)
conts = cv2.findContours(thresh_img, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
conts = imutils.grab_contours(conts)
# print(len(conts))
cont_img = np.zeros(img.shape)
cont_img = cv2.drawContours(cont_img,conts, -1 , (0,255,0),1)
cont_img2 = np.copy(img)
cont_img2 = cv2.drawContours(cont_img2,conts, -1 , (0,255,0),1)
# cv2.imshow('black',cont_img)
def midPoint (ptA, ptB):
return ((ptA[0]+ptB[0])/2 , (ptA[1]+ptB[1])/2)
for c in conts:
box = cv2.minAreaRect(c)
# print(box)
box = cv2.boxPoints(box)
box = np.array(box, dtype='int')
area = cv2.contourArea(c)
# print(area)
if cv2.contourArea(c) < 50000:
continue
area = cv2.contourArea(c)
print(area/96)
print("________")
cv2.drawContours(cont_img,[c],-1,(0,0,255),2)
cv2.drawContours(cont_img, [box], -1, (255, 0, 0), 2)
cv2.drawContours(cont_img2,[c],-1,(0,0,255),2)
cv2.drawContours(cont_img2, [box], -1, (255, 0, 0), 2)
for (x,y) in box:
cv2.circle(cont_img,(x,y),2,(255,255,255),20)
cv2.circle(cont_img2, (x, y), 2, (255, 255, 255), 20)
(tl,tr,br,bl) = box
(trX,tlX) = midPoint(tr,tl)
(brX, blX) = midPoint(br, bl)
cv2.circle(cont_img, (int(trX), int(tlX)),1, (0, 255, 255), 20)
cv2.circle(cont_img, (int(brX), int(blX)),1, (0, 255, 255), 20)
cv2.line(cont_img, (int(trX), int(tlX)), (int(brX), int(blX)), (255,0,255), 2)
cv2.circle(cont_img2, (int(trX), int(tlX)), 1, (0, 255, 255), 20)
cv2.circle(cont_img2, (int(brX), int(blX)), 1, (0, 255, 255), 20)
cv2.line(cont_img2, (int(trX), int(tlX)), (int(brX), int(blX)), (255, 0, 255), 2)
dA = dist.euclidean((int(trX), int(tlX)), (int(brX), int(blX)))
cv2.putText(cont_img,"{:.2f} px".format(dA/300), (int(trX+20), int(blX-20)),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,255,255),2)
cv2.putText(cont_img2,"{:.2f} px".format(dA/300), (int(trX+20), int(blX-20)),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,255,255),2)
(tlX, blX) = midPoint(tl, bl)
(trX, brX) = midPoint(tr, br)
cv2.circle(cont_img, (int(tlX), int(blX)),1, (0, 255, 0), 20)
cv2.circle(cont_img, (int(trX), int(brX)),1, (0, 255, 0), 20)
cv2.line(cont_img, (int(tlX), int(blX)), (int(trX), int(brX)), (255,0,255), 2)
cv2.circle(cont_img2, (int(tlX), int(blX)),1, (0, 255, 0), 20)
cv2.circle(cont_img2, (int(trX), int(brX)),1, (0, 255, 0), 20)
cv2.line(cont_img2, (int(tlX), int(blX)), (int(trX), int(brX)), (255,0,255), 2)
dB = dist.euclidean((int(tlX), int(blX)), (int(trX), int(brX)))
cv2.putText(cont_img,"{:.2f} in".format(dB/300), (int(trX-120), int(blX-320)),
cv2.FONT_HERSHEY_SIMPLEX,0.7,(0, 255, 255),2)
cv2.putText(cont_img2, "{:.2f} in".format(dB/300), (int(trX-120), int(blX -320)),
cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 255, 255),2)
print("Line A", "{:.2f} in".format(dA/96))
print("Line B", "{:.2f} in".format(dB/96))
print("**************")
# cont_img = cv2.resize(cont_img,(0,0),fx=0.5,fy=0.5)
cv2.imshow('area',cont_img)
# cont_img2 = cv2.resize(cont_img2,(0,0),fx=0.5,fy=0.5)
# cv2.imshow('ORG',cont_img2)
# thresh_img = cv2.resize(thresh_img,(0,0),fx=0.5,fy=0.5)
cv2.imshow("thresh", thresh_img)
# imgStack = stackImages(0.4,([img,rgb_img],
# [rgb_img_thresh,cont_img]))
#
# cv2.imshow('imgthresh',imgStack)
cv2.waitKey(0)
cv2.destroyAllWindows()
Check the image of output the whole window size is getting measured
I tried to either limit the measurement or area of contourarea(c) or c itself but it doesn't work for me
I have done image segmentation of the image using PyTorch. I am trying to get the pixel count of Boat class to measure the area. As an example in the image I want to get the pixel count to measure the boat. How do I do that? from the pixel count is it possible to measure the are of the boat?
I am confused and trying to find a way. I would appreciate if anybody can guide me for that.
**The coding is as below:
**
from torchvision import models
fcn = models.segmentation.fcn_resnet101(pretrained=True).eval()
from PIL import Image
import matplotlib.pyplot as plt
import torch
img = Image.open('boat.jpg')
plt.imshow(img)
plt.show()
# Apply the transformations needed
#Resize the image to (256 x 256)
#CenterCrop it to (224 x 224)
import torchvision.transforms as T
trf = T.Compose([T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])])
inp = trf(img).unsqueeze(0)
out = fcn(inp)['out']
print (out.shape)
#now this 21 channeled output into a 2D image or a 1 channeled image, where each pixel of that image corresponds to a class.
import numpy as np
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
print (om.shape)
print (np.unique(om))
# Define the helper function
def decode_segmap(image, nc=21):
label_colors = np.array([(0, 0, 0), # 0=background
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
(128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
(0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
# 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person
(192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)])
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
idx = image == l
r[idx] = label_colors[l, 0]
g[idx] = label_colors[l, 1]
b[idx] = label_colors[l, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
rgb = decode_segmap(om)
plt.imshow(rgb); plt.show()
I want to find some guidance
You are looking for skimage.measure.regionprops. Once you have the predicted label map (om in your code) you can apply regionprops to it and get the area of each region.
According to your code snippet, the output om is a tensor of category indices (0 - background, 1 - aeroplane, 2 - bicycle,....).
In order to get the area of a specific category, you just need to compare the output map with the corresponding index, then sum up the results.
For example, with the category boat with the index 4:
BOAT_INDEX = 4
area = torch.sum(om == BOAT_INDEX).item()
How to display the application in windows.
Code for Reference:
from tkinter import N
import numpy as np
from keras.preprocessing.image import img_to_array
import cv2
import imutils
from keras.models import load_model
import numpy as np
# parameters for loading data and images
detection_model_path = 'ER_Project//haar-cascade-files-master/haarcascade_frontalface_default.xml'
emotion_model_path = 'ER_Project/_mini_XCEPTION.102-0.66.hdf5'
# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry", "disgust", "scared", "happy", "sad", "surprised",
"neutral"]
#feelings_faces = []
# for index, emotion in enumerate(EMOTIONS):
# feelings_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))
# starting video streaming
cv2.namedWindow('your_face')
camera = cv2.VideoCapture(0)
while True:
print("Hello")
frame = camera.read()[1]
# reading the frame
frame = imutils.resize(frame, width=300)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detection.detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
canvas = np.zeros((250, 300, 3), dtype="uint8")
frameClone = frame.copy()
if len(faces) > 0:
faces = sorted(faces, reverse=True,
key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
# Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
# the ROI for classification via the CNN
roi = gray[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (64, 64))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = emotion_classifier.predict(roi)[0]
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
else:
continue
for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
# construct the label text
text = "{}: {:.2f}%".format(emotion, prob * 100)
# draw the label + probability bar on the canvas
# emoji_face = feelings_faces[np.argmax(preds)]
w = int(prob * 300)
cv2.rectangle(canvas, (7, (i * 35) + 5),
(w, (i * 35) + 35), (0, 0, 255), -1)
cv2.putText(canvas, text, (10, (i * 35) + 23),
cv2.FONT_HERSHEY_SIMPLEX, 0.45,
(255, 255, 255), 2)
cv2.putText(frameClone, label, (fX, fY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
(0, 0, 255), 2)
# for c in range(0, 3):
# frame[200:320, 10:130, c] = emoji_face[:, :, c] * \
# (emoji_face[:, :, 3] / 255.0) + frame[200:320,
# 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
cv2.imshow('your_face', frameClone)
cv2.imshow("Probabilities", canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
NUMA SUPPORT:
2022-04-20 04:36:21.181568: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:922] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2022-04-20 04:36:21.181664: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 3951 MB memory: -> device: 0, name: NVIDIA GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5
I need to run this openCV gui app on windows.
I have a phone and it's HSV histogram like blow,and I want to track this phone's movement.Based on it's histogram,I set image range like this:
greenLower = (300, 0, 50)
greenUpper = (50, 128,250 )
cv2.inRange(hsv, greenLower, greenUpper)
But nothing got detected out when waving the phone,and I am pretty sure it is because color range is wrong,would you tell me how to get color rang setting right?Especially,when HUE values are between [300~50],should I set it to (50~300) or (300~50) due to HUE is a cirle.
Phone
HSV histogram:
You have wrongly set the upper and lower bounds, they must be:
greenLower = (50, 0, 50) # Previously (300, 0, 50)
greenUpper = (300, 128, 250) # Previously (50, 128,250)
Also make sure that hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) as OpenCV follows the BGR convention.
EDIT:
To segment colors in multiple ranges 0~50 and 300~359, you can perform cv2.inRange() twice for two ranges as:
greenLower1 = (0, 0, 20)
greenUpper1 = (50, 128, 100)
greenLower2 = (300, 0, 20)
greenUpper2 = (359, 128, 100)
mask1 = cv2.inRange(img_hsv, greenLower1, greenUpper1)
mask2 = cv2.inRange(img_hsv, greenLower2, greenUpper2)
mask = cv2.max(mask1, mask2)