In the following code, I retrieve a created blob URL which I intend to process. Could anyone suggest a tutorial that steps through how I would download the blob (which is a video), open it, and process each frame when this event is triggered?
You could refer to this article and download_blob method to download the blob.
And refer to here for processing each frame.
import json
import logging
import cv2
import azure.functions as func
from azure.storage.blob import BlobServiceClient, generate_blob_sas, AccessPolicy, BlobSasPermissions
from azure.core.exceptions import ResourceExistsError
from datetime import datetime, timedelta
def main(event: func.EventGridEvent):
result = json.dumps({
'id': event.id,
'data': event.get_json(),
'topic': event.topic,
'subject': event.subject,
'event_type': event.event_type,
})
logging.info('Python EventGrid trigger processed an event: %s', result)
connect_string = "connect string of storage"
DEST_FILE = "path to download the video"
blob_service_client = BlobServiceClient.from_connection_string(connect_string)
blob_url = event.get_json().get('url')
logging.info('blob URL: %s', blob_url)
blob_name = blob_url.split("/")[-1].split("?")[0]
container_name = blob_url.split("/")[-2].split("?")[0]
# Download blob to DEST_FILE
blob_client = blob_service_client.get_blob_client(container=container_name, blob=blob_name)
with open(DEST_FILE, "wb") as my_blob:
download_stream = blob_client.download_blob()
my_blob.write(download_stream.readall())
# Process images of a video, frame by frame
video_path = DEST_FILE + "/" +blob_name
logging.info('video path: %s', video_path)
cap = cv2.VideoCapture(video_path)
count = 0
while cap.isOpened():
ret,frame = cap.read()
cv2.imshow('window-name', frame)
cv2.imwrite("frame%d.jpg" % count, frame)
count = count + 1
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() # destroy all opened windows
Related
I was trying to deploy ml model using node_js with help of ChildProcess package ,while running __predict(), it is taking too long and end with code_1 error.
Here I share all related code to decode the issue :
Model python code -->
import keras
import time
start = time.time()
encoder = keras.models.load_model('enc', compile = False)
decoder = keras.models.load_model('dec', compile = False)
import numpy as np
from flask import Flask, request, jsonify , render_template
import tensorflow as tf
import pickle
import string
import re
from keras_preprocessing.sequence import pad_sequences
def initialize_hidden_state():
return tf.zeros((1, 1024))
eng_tokenizer , hin_tokenizer = pickle.load( open('tokenizer.pkl','rb'))
def clean(text):
text = text.lower()
special_char = set(string.punctuation+'ред') # Set of all special characters
# Remove all the special characters
text = ''.join(word for word in text if word not in special_char)
seq = eng_tokenizer.texts_to_sequences([text])
seq = pad_sequences(seq, maxlen=23, padding='post')
return seq
def __predict(data):
# Get the data from the POST request.
#data = request.get_json(force=True)
clean_input = clean(data)
# Make prediction using model loaded from disk as per the data.
hidden_enc = initialize_hidden_state()
enc_out, enc_hidden = encoder(clean_input, hidden_enc)
result = ''
dec_hidden = enc_hidden
dec_input = tf.expand_dims(hin_tokenizer.texts_to_sequences(['<Start>'])[0], 0)
#------------------------------------------------------------------
for t in range(25):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
predicted_id = tf.argmax(predictions[0]).numpy()
x = hin_tokenizer.sequences_to_texts([[predicted_id]])[0]
if x == 'end':
break
result += x + ' '
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
CLEANR = re.compile(r"([A-Za-z])", re.DOTALL)
result = re.sub(CLEANR, '', result)
return result
# import json
# with open('data.json', 'r') as openfile:
# json_object = json.load(openfile).get('data')
data =__predict("file")
end= time.time()
# print(start-end)
data1 = data +"abcd"
print(data1)
# print("abcd")
# dictionary = {
# "data": data,
# }
# json_object = json.dumps(dictionary, indent=2)
# with open("result.json", "w") as outfile:
# outfile.write(json_object)
When I type print("abcd") or print(start-end), it is giving result ,ending with code_0. But when I type print("data") not giving any result and ending with code_1 .
Here is the childProcess code -->
app.get('/', (req, res) => {
let dataToSend
let largeDataSet = []
// spawn new child process to call the python script
const python = spawn('python', ['app.py'])
// console.log(python);
// collect data from script
python.stdout.on('data', function (data) {
console.log('Pipe data from python script ...')
//dataToSend = data;
largeDataSet.push(data)
})
// in close event we are sure that stream is from child process is closed
python.on('close', (code) => {
console.log(`child process close all stdio with code ${code}`)
// send data to browser
// largeDataSet = []
console.log(largeDataSet.join(''));
res.send(largeDataSet.join(''))
})
})
Here is the error --->
child process close all stdio with code 1
Pls help , I tried to understand the problem but failed severely even in understanding it.
Thanks in advance !!!
I have an application like this 1 with one display to show real-time basler camera into it . I already figured out how to connect to Basler camera and show video on it but the video is not very smooth.
#Connect to a camera
for i in MainWindow.camera_db.all():
if True:
info = None
for x in pylon.TlFactory.GetInstance().EnumerateDevices():
if x.GetSerialNumber() == i['id']:
info = x
break
if info is not None:
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateDevice(info))
camera.Open()
if MainWindow.viewer1 is None:
MainWindow.viewer1 = BaslerOpenCVViewer(camera)
logging.warning(f'Camera 1 - serial number: {i["id"]}-OK')
else:
logging.warning('Camera with {} serial number not found'.format(i['id']))
and then I tried
def update_frame(self):
try:
frame = MainWindow.viewer1.get_image()
# frame = cv2.imread('test.jpg')
self.load_display1(frame) # take a frame and show it on MainWindow.display
return frame
except Exception as e:
logging.warning(str(e))
self.time_get_image = QtCore.QTimer(self, interval=1)
self.time_get_image.timeout.connect(self.get_image) #call update_frame function every 1ms to get a real-time video from Basler camera but it's not work well
self.time_get_image.start()
Is there another ways to connect to Basler camera continuous mode and show it on application.
create a label and send the img to displayImage fucnbtion. you will get the image.
from pypylon import pylon
import cv2
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
# if grabResult.GrabSucceded():
image = converter.Convert(grabResult)
img = image.GetArray()
self.displayImage(img)
cv2.imshow("video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cv2.waitKey()
def displayImage(self, img):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if (img.shape[2]) == 4:
qformat = QImage.Format_RGB888
else:
qformat = QImage.Format_RGB888
img = QImage(img, img.shape[1], img.shape[0], qformat)
img = img.rgbSwapped()
self.ui.Camera_lbl.setPixmap(QPixmap.fromImage(img))
self.ui.Camera_lbl.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignHCenter)
You can use the following code
from pypylon import pylon
import cv2
# conecting to the first available camera
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
# Grabing Continusely (video) with minimal delay
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
# converting to opencv bgr format
converter.OutputPixelFormat = pylon.PixelType_BGR8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
while camera.IsGrabbing():
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
if grabResult.GrabSucceeded():
# Access the image data
image = converter.Convert(grabResult)
img = image.GetArray()
cv2.namedWindow('title', cv2.WINDOW_NORMAL)
cv2.imshow('title', img)
k = cv2.waitKey(1)
if k == 27:
break
grabResult.Release()
# Releasing the resource
camera.StopGrabbing()
cv2.destroyAllWindows()
The code is taken from this github:pypylon/samples/opencv.py
My application is to switch on cam on the client-side, take the frame, perform the ML process on it in the backend and throw it back to the client.
This part of the code (in bold) is throwing error - PngImageFile' object has no attribute 'shape'.
This code line has a problem - frame = imutils.resize(pimg, width=700)
I guess some processing is not in the right format. Please guide
#socketio.on('image')
def image(data_image):
sbuf = io.StringIO()
sbuf.write(data_image)
# decode and convert into image
b = io.BytesIO(base64.b64decode(data_image))
pimg = Image.open(b)
# Process the image frame
frame = imutils.resize(**pimg,** width=700)
frame = cv2.flip(frame, 1)
imgencode = cv2.imencode('.jpg', frame)[1]
# base64 encode
stringData = base64.b64encode(imgencode).decode('utf-8')
b64_src = 'data:image/jpg;base64,'
stringData = b64_src + stringData
# emit the frame back
emit('response_back', stringData)
The problem is that pimg is in PIL image format. While imutils.resize function expects the image in Numpy array format. So, after pimg = Image.open(b) line you need to convert the PIL image to Numpy array like below:
pimg = np.array(pimg)
For this you have to import numpy library like below:
import numpy as np
Try this out. This helped for a similar problem for me.
img_arr = np.array(img.convert("RGB"))
The problem was in the mode of the image. I had to convert it from 'P' to 'RGB'.
print(img)
>> <PIL.PngImagePlugin.PngImageFile image mode=P size=500x281 at 0x7FE836909C10>
I'm completely new to opencv and tesseract.
I spent all day trying to make code that would parse game duration from images like that: original image (game duration is in the top left corner)
I came to code that manages to recognize the duration sometimes (about 40% of all cases). Here it is:
try:
from PIL import Image
except ImportError:
import Image
import os
import cv2
import pytesseract
import re
import json
def non_digit_split(s):
return filter(None, re.split(r'(\d+)', s))
def time_to_sec(min, sec):
return (int(min) * 60 + int(sec)).__str__()
def process_img(image_url):
img = cv2.resize(cv2.imread('./images/' + image_url), None, fx=5, fy=5, interpolation=cv2.INTER_CUBIC)
str = pytesseract.image_to_string(img)
if "WIN " in str:
time = list(non_digit_split(str.split("WIN ",1)[1][0:6].strip()))
str = time_to_sec(time[0], time[2])
else:
str = 'Not recognized'
return str
res = {}
img_list = os.listdir('./images')
print(img_list)
for i in img_list:
res[i] = process_img(i)
with open('output.txt', 'w') as file:
file.write(json.dumps(res))
Don't even ask how I came to resizing image, but it helped a little.
I also tried to crop image first like that:
cropped image
but tesseract couldn't find any text here.
I'm sure that the issue I'm trying to solve is pretty easy. Can you please point me the right direction? How should I preprocess it so tesseract will parse it right?
Thanks to #DmitriiZ comment I managed to produce working piece of code.
I made a preprocessor that outputs something like that:
Preprocessed image
Tesseract handles it just fine.
Here is the full code:
try:
from PIL import Image
except ImportError:
import Image
import os
import pytesseract
import json
def is_dark(image):
pixels = image.getdata()
black_thresh = 100
nblack = 0
for pixel in pixels:
if (sum(pixel) / 3) < black_thresh:
nblack += 1
n = len(pixels)
if (nblack / float(n)) > 0.5:
return True
else:
return False
def preprocess(img):
basewidth = 500
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
#Enlarging image
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
#Converting image to black and white
img = img.convert("1", dither=Image.NONE)
return img
def process_img(image_url):
img = Image.open('./images/' + image_url)
#Area we need to crop can be found in one of two different areas,
#depending on which team won. You can replace that block and is_dark()
#function by just img.crop().
top_area = (287, 15, 332, 32)
crop = img.crop(top_area)
if is_dark(crop):
bot_area = (287, 373, 332, 390)
crop = img.crop(bot_area)
img = preprocess(crop)
str = pytesseract.image_to_string(img)
return str
res = {}
img_list = os.listdir('./images')
print(img_list)
for i in img_list:
res[i] = process_img(i)
with open('output.txt', 'w') as file:
file.write(json.dumps(res))
I have a facedetection training code. It gives me some issues and i have no clue why.
I am using a MAC and seems like there is missing something. Can you please advise what should i do?
Thank you in advance
OpenCV(3.4.1) Error: Assertion failed (!empty()) in detectMultiScale, file /tmp/opencv-20180426-73279-16a912g/opencv-3.4.1/modules/objdetect/src/cascadedetect.cpp, line 1698
Traceback (most recent call last):
File "/Users/Desktop/OpenCV-Python-Series-master/src/faces-train.py", line 36, in <module>
faces = face_cascade.detectMultiScale(image_array, scaleFactor=1.5, minNeighbors=5)
cv2.error: OpenCV(3.4.1) /tmp/opencv-20180426-73279-16a912g/opencv-3.4.1/modules/objdetect/src/cascadedetect.cpp:1698: error: (-215) !empty() in function detectMultiScale
[Finished in 0.421s]
And my code is below.
import cv2
import os
import numpy as np
from PIL import Image
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
current_id = 0
label_ids = {}
y_labels = []
x_train = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ", "-").lower()
#print(label, path)
if not label in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]
#print(label_ids)
#y_labels.append(label) # some number
#x_train.append(path) # verify this image, turn into a NUMPY arrray, GRAY
pil_image = Image.open(path).convert("L") # grayscale
size = (550, 550)
final_image = pil_image.resize(size, Image.ANTIALIAS)
image_array = np.array(final_image, "uint8")
#print(image_array)
faces = face_cascade.detectMultiScale(image_array, scaleFactor=1.5, minNeighbors=5)
for (x,y,w,h) in faces:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)
#print(y_labels)
#print(x_train)
with open("pickles/face-labels.pickle", 'wb') as f:
pickle.dump(label_ids, f)
recognizer.train(x_train, np.array(y_labels))
recognizer.save("recognizers/face-trainner.yml")
The assertion which fails indicates that your cascade is not loaded correctly. You can verify it by calling face_cascade.empty() just after the constructor. Please make sure that the path you provided ('cascades/data/haarcascade_frontalface_alt2.xml') is correct. When it points to a not existing file then there is no exception thrown by the constructor so you can easily miss it without calling empty() explicitly.