ImportError: cannot import name 'NavigateThroughPoses' from 'nav2_msgs.action' - robotics

I am trying to follow https://github.com/SteveMacenski/nav2_rosdevday_2021/tree/main/nav2_rosdevday_2021
As I run robot_navigator.py, I am getting
ImportError: cannot import name 'NavigateThroughPoses' from 'nav2_msgs.action' (/opt/ros/foxy/lib/python3.8/site-packages/nav2_msgs/action/__init__.py)
The problem is in
from nav2_msgs.action import NavigateThroughPoses, NavigateToPose, FollowWaypoints, ComputePathToPose, ComputePathThroughPoses
It can not find NavigateThroughPoses and ComputePathThroughPoses (while finding the rest).
When I follow to init.py, I see:
from nav2_msgs.action._back_up import BackUp # noqa: F401
from nav2_msgs.action._compute_path_to_pose import ComputePathToPose # noqa: F401
from nav2_msgs.action._dummy_recovery import DummyRecovery # noqa: F401
from nav2_msgs.action._follow_path import FollowPath # noqa: F401
from nav2_msgs.action._follow_waypoints import FollowWaypoints # noqa: F401
from nav2_msgs.action._navigate_to_pose import NavigateToPose # noqa: F401
from nav2_msgs.action._spin import Spin # noqa: F401
from nav2_msgs.action._wait import Wait # noqa: F401
So NavigateThroughPoses and ComputePathThroughPoses are, indeed, not there.
Any idea what is wrong and how to fix it?
Thank you.

Related

Import Error: Deploying custom Pytorch Model on streamlit

Please, so trying to deploy a custom Pytorch based web-app on streamlit, everything works locally, however when deployed, I find the following Error in the Logs:
Downloading: "https://github.com/ultralytics/yolov5/archive/master.zip" to /home/appuser/.cache/torch/hub/master.zip
2022-08-14 12:23:17.584 Uncaught app exception
Traceback (most recent call last):
File "/home/appuser/venv/lib/python3.9/site-packages/streamlit/scriptrunner/script_runner.py", line 557, in _run_script
exec(code, module.__dict__)
File "app.py", line 28, in <module>
model = torch.hub.load('ultralytics/yolov5', 'custom', path=run_model_path)
File "/home/appuser/venv/lib/python3.9/site-packages/torch/hub.py", line 339, in load
model = _load_local(repo_or_dir, model, *args, **kwargs)
File "/home/appuser/venv/lib/python3.9/site-packages/torch/hub.py", line 368, in _load_local
model = entry(*args, **kwargs)
File "/home/appuser/.cache/torch/hub/ultralytics_yolov5_master/hubconf.py", line 74, in custom
return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
File "/home/appuser/.cache/torch/hub/ultralytics_yolov5_master/hubconf.py", line 31, in _create
from models.common import AutoShape, DetectMultiBackend
File "/home/appuser/.cache/torch/hub/ultralytics_yolov5_master/models/common.py", line 14, in <module>
import cv2
File "/home/appuser/venv/lib/python3.9/site-packages/cv2/__init__.py", line 5, in <module>
from .cv2 import *
ImportError: libGL.so.1: cannot open shared object file: No such file or directory
Here's what the requrements.txt contains:
pillow<=9.2.0
numpy<=1.21.0
streamlit==1.11.0
torch<=1.8.2+cu111
opencv<=4.5.1
Would love your assistance.
If you're deploying the app, opencv-python-headless would be the appropriate package rather than opencv

Cannot use sqlite with the LocalExecutor [AIrflow]

I am trying to restart the airflow scheduler using the following command
airflow scheduler
I am using docker. I went inside my docker image for airflow and opened the CLI for my airflow image. That is where I used this command.
It throws an exception
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 25, in <module>
from airflow.configuration import conf
File "/usr/local/lib/python3.6/site-packages/airflow/__init__.py", line 31, in <module>
from airflow.utils.log.logging_mixin import LoggingMixin
File "/usr/local/lib/python3.6/site-packages/airflow/utils/__init__.py", line 24, in <module>
from .decorators import apply_defaults as _apply_defaults
File "/usr/local/lib/python3.6/site-packages/airflow/utils/decorators.py", line 36, in <module>
from airflow import settings
File "/usr/local/lib/python3.6/site-packages/airflow/settings.py", line 37, in <module>
from airflow.configuration import conf, AIRFLOW_HOME, WEBSERVER_CONFIG # NOQA F401
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 731, in <module>
conf.read(AIRFLOW_CONFIG)
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 421, in read
self._validate()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 213, in _validate
self._validate_config_dependencies()
File "/usr/local/lib/python3.6/site-packages/airflow/configuration.py", line 247, in _validate_config_dependencies
self.get('core', 'executor')))
airflow.exceptions.AirflowConfigException: error: cannot use sqlite with the LocalExecutor
I am looking for any way to restart the airflow scheduler.
This is expected.
Since sqlite doesn’t support multiple connections it can only be used with SequentialExecutor. This is also explained in the docs.
If you want to use LocalExecutor please set MySQL or PostgreSQL as backend.

VnPy - ImportError: No module named vnctpmd

I just cloned VnPy, and I am trying to run VnTrader on a Ubuntu 16.04 Machine, as mentioned in the VnPy Starter Guide. I followed step by step, but when I run
python vnpy/examples/VnTrader/run.py
I get the following Import Error. What is the problem?
Traceback (most recent call last):
File "run.py", line 28, in <module>
from vnpy.trader.gateway import (ctpGateway, ibGateway)
File "/home/alessandro/anaconda2/lib/python2.7/site-packages/vnpy-1.9.0-py2.7.egg/vnpy/trader/gateway/ctpGateway/__init__.py", line 5, in <module>
from .ctpGateway import CtpGateway
File "/home/alessandro/anaconda2/lib/python2.7/site-packages/vnpy-1.9.0-py2.7.egg/vnpy/trader/gateway/ctpGateway/ctpGateway.py", line 16, in <module>
from vnpy.api.ctp import MdApi, TdApi, defineDict
File "/home/alessandro/anaconda2/lib/python2.7/site-packages/vnpy-1.9.0-py2.7.egg/vnpy/api/ctp/__init__.py", line 4, in <module>
from .vnctpmd import MdApi
ImportError: No module named vnctpmd
ImportError: No module named vnctpmd
The vnctpmd module, is the API interface for CTP broker of the VnPy package. As for every other API interface, you need to first build it, and then import it.
In your case, you probably didn't build the CTP interface when prompted during the installation of VnPy, so now run.py can't import the module.
Do you need 'CTP' interface? No
Solution A: I don't need CTP interface
If you don't need CTP interface you can open run.py commenting the parts relative to CTP (and also relative to all the other interfaces you didn't build)
# /examples/VnTrader/run.py script
# this version runs with only the IB interface built
# encoding: UTF-8
# 重载sys模块,设置默认字符串编码方式为utf8
try:
reload # Python 2
except NameError: # Python 3
from importlib import reload
import sys
reload(sys)
try:
sys.setdefaultencoding('utf8')
except AttributeError:
pass
# 判断操作系统
import platform
system = platform.system()
# vn.trader模块
from vnpy.event import EventEngine
from vnpy.trader.vtEngine import MainEngine
from vnpy.trader.uiQt import createQApp
from vnpy.trader.uiMainWindow import MainWindow
# 加载底层接口
from vnpy.trader.gateway import ibGateway
# ### here comment the interfaces you don't need
# from vnpy.trader.gateway import (ctpGateway, ibGateway)
if system == 'Linux':
# from vnpy.trader.gateway import xtpGateway
pass
elif system == 'Windows':
from vnpy.trader.gateway import (femasGateway, xspeedGateway,
secGateway)
# 加载上层应用
from vnpy.trader.app import (riskManager, ctaStrategy,
spreadTrading, algoTrading)
#----------------------------------------------------------------------
def main():
"""主程序入口"""
# 创建Qt应用对象
qApp = createQApp()
# 创建事件引擎
ee = EventEngine()
# 创建主引擎
me = MainEngine(ee)
# 添加交易接口
# me.addGateway(ctpGateway)
me.addGateway(ibGateway)
if system == 'Windows':
me.addGateway(femasGateway)
me.addGateway(xspeedGateway)
me.addGateway(secGateway)
if system == 'Linux':
# me.addGateway(xtpGateway)
pass
# 添加上层应用
me.addApp(riskManager)
me.addApp(ctaStrategy)
me.addApp(spreadTrading)
me.addApp(algoTrading)
# 创建主窗口
mw = MainWindow(me, ee)
mw.showMaximized()
# 在主线程中启动Qt事件循环
sys.exit(qApp.exec_())
if __name__ == '__main__':
main()
Solution B: I need CTP interface
In case you need CTP, you can just reinstall vnpy with the command
bash install.sh
and when prompted 'do you need CTP' answer Yes

ImportError: no module named 'umqtt.MQTTClient' but file with class exists

I installed MicroPython v1.9.3-8 on my ESP8266 board. Here is the beginning of my main.py file:
from machine import Pin
led = Pin(2, Pin.OUT, value=1)
#---MQTT Sending---
from time import sleep_ms
from ubinascii import hexlify
from machine import unique_id
#import socket
from umqtt import MQTTClient
SERVER = "10.6.6.192"
CLIENT_ID = hexlify(unique_id())
TOPIC1 = b"/server/tem"
TOPIC2 = b"/server/hum"
TOPIC3 = b"/server/led"
The line from umqtt import MQTTClient throws an error when I reset the module:
File "main.py", line 11, in < module >
ImportError: no module named 'umqtt.MQTTClient'
Here is my umqtt.py file.
I have the umqtt.py file uploaded to my esp8266 with webrepl. When I run:
import os
os.listdir()
I get this output:
>>> os.listdir()
['boot.py', 'webrepl_cfg.py', 'umqtt.py', 'main.py']
Since in the umqtt.py file in line 8 the class MQTTClient is defined, I do not know what am I doing wrong to get this code to work.
I think you need to either specify the simple or the robust versions:
from umqtt.simple import MQTTClient

Google Cloud Dataflow cryptic message when downloading file from gcp to local system

I am writing a dataflow pipeline that processes videos from a google cloud bucket. My pipeline downloads each work item to the local system and then reuploads results back to GCP bucket. Following previous question.
The pipeline works on local DirectRunner, i'm having trouble debugging on DataFlowRunnner.
The error reads
File "run_clouddataflow.py", line 41, in process
File "/usr/local/lib/python2.7/dist-packages/google/cloud/storage/blob.py", line 464, in download_to_file self._do_download(transport, file_obj, download_url, headers)
File "/usr/local/lib/python2.7/dist-packages/google/cloud/storage/blob.py", line 418, in _do_download download.consume(transport) File "/usr/local/lib/python2.7/dist-packages/google/resumable_media/requests/download.py", line 101, in consume self._write_to_stream(result)
File "/usr/local/lib/python2.7/dist-packages/google/resumable_media/requests/download.py", line 62, in _write_to_stream with response: AttributeError: __exit__ [while running 'Run DeepMeerkat']
When trying to execute blob.download_to_file(file_obj) within:
storage_client=storage.Client()
bucket = storage_client.get_bucket(parsed.hostname)
blob=storage.Blob(parsed.path[1:],bucket)
#store local path
local_path="/tmp/" + parsed.path.split("/")[-1]
print('local path: ' + local_path)
with open(local_path, 'wb') as file_obj:
blob.download_to_file(file_obj)
print("Downloaded" + local_path)
I'm guessing that the workers are not in permission to write locally? Or perhaps there is not a /tmp folder in the dataflow container. Where should I write objects? Its hard to debug without access to the environment. Is it possible to access stdout from workers for debugging purposes (serial console?)
EDIT #1
I've tried explicitly passing credentials:
try:
credentials, project = google.auth.default()
except:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = known_args.authtoken
credentials, project = google.auth.default()
as well as writing to cwd(), instead of /tmp/
local_path=parsed.path.split("/")[-1]
print('local path: ' + local_path)
with open(local_path, 'wb') as file_obj:
blob.download_to_file(file_obj)
Still getting the cryptic error on blob downloads from gcp.
Full Pipeline script is below, setup.py is here.
import logging
import argparse
import json
import logging
import os
import csv
import apache_beam as beam
from urlparse import urlparse
from google.cloud import storage
##The namespaces inside of clouddataflow workers is not inherited ,
##Please see https://cloud.google.com/dataflow/faq#how-do-i-handle-nameerrors, better to write ugly import statements then to miss a namespace
class PredictDoFn(beam.DoFn):
def process(self,element):
import csv
from google.cloud import storage
from DeepMeerkat import DeepMeerkat
from urlparse import urlparse
import os
import google.auth
DM=DeepMeerkat.DeepMeerkat()
print(os.getcwd())
print(element)
#try adding credentials?
#set credentials, inherent from worker
credentials, project = google.auth.default()
#download element locally
parsed = urlparse(element[0])
#parse gcp path
storage_client=storage.Client(credentials=credentials)
bucket = storage_client.get_bucket(parsed.hostname)
blob=storage.Blob(parsed.path[1:],bucket)
#store local path
local_path=parsed.path.split("/")[-1]
print('local path: ' + local_path)
with open(local_path, 'wb') as file_obj:
blob.download_to_file(file_obj)
print("Downloaded" + local_path)
#Assign input from DataFlow/manifest
DM.process_args(video=local_path)
DM.args.output="Frames"
#Run DeepMeerkat
DM.run()
#upload back to GCS
found_frames=[]
for (root, dirs, files) in os.walk("Frames/"):
for files in files:
fileupper=files.upper()
if fileupper.endswith((".JPG")):
found_frames.append(os.path.join(root, files))
for frame in found_frames:
#create GCS path
path="DeepMeerkat/" + parsed.path.split("/")[-1] + "/" + frame.split("/")[-1]
blob=storage.Blob(path,bucket)
blob.upload_from_filename(frame)
def run():
import argparse
import os
import apache_beam as beam
import csv
import logging
import google.auth
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', default="gs://api-project-773889352370-testing/DataFlow/manifest.csv",
help='Input file to process.')
parser.add_argument('--authtoken', default="/Users/Ben/Dropbox/Google/MeerkatReader-9fbf10d1e30c.json",
help='Input file to process.')
known_args, pipeline_args = parser.parse_known_args()
#set credentials, inherent from worker
try:
credentials, project = google.auth.default()
except:
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = known_args.authtoken
credentials, project = google.auth.default()
p = beam.Pipeline(argv=pipeline_args)
vids = (p|'Read input' >> beam.io.ReadFromText(known_args.input)
| 'Parse input' >> beam.Map(lambda line: csv.reader([line]).next())
| 'Run DeepMeerkat' >> beam.ParDo(PredictDoFn()))
logging.getLogger().setLevel(logging.INFO)
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
I spoke to the google-cloud-storage package mantainer, this was a known issue. Updating specific versiosn in my setup.py to
REQUIRED_PACKAGES = ["google-cloud-storage==1.3.2","google-auth","requests>=2.18.0"]
fixed the issue.
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3836

Resources