publishing ROS topic from the execution callback of ROS Action - ros

I am building a state machine node(ROS2 Action_client) that interact with Planner node(ROS2 Action_server). In the execution callback of the planner node I need to publish a topic (from the same node). Is it possible to publish a topic while action is running?
class PlanningProblemServer(Node):
def __init__(self): super().__init__('planning_problem_server')
self._action_server = ActionServer(self, PlanAid, 'planning_problem_sm',
execute_callback=self.execute_callback,
goal_callback=self.goal_callback,
cancel_callback=self.cancel_callback)
self._publisher = self.create_publisher(Bool, 'Planning_problem_data')
def goal_callback(self, goal_request): #Some code
def cancel_callback(self, goal_handle): #Some code
def execute_callback(self, goal_handle):
feedback_msg.feedback = "loading planning problem...!"
goal_handle.publish_feedback(feedback_msg)
msg = String()
msg.data = "abc"
self._publisher.publish(msg)
goal_handle.set_succeeded()
return result
def main(args=None):
rclpy.init(args=args)
minimal_action_server = PlanningProblemServer()
rclpy.spin(minimal_action_server)
minimal_action_server.destroy()
rclpy.shutdown()
if __name__ == '__main__': main()
with above code I am getting following error,
Traceback (most recent call last):
File "/home/developer/ros2_ws/ros2/src/planning_problem/planning_prob_pkg/planning_prob_node.py", line 102, in <module>
if __name__ == '__main__': main()
File "/home/developer/ros2_ws/ros2/src/planning_problem/planning_prob_pkg/planning_prob_node.py", line 96, in main
rclpy.spin(minimal_action_server)
File "/opt/ros/crystal/lib/python3.6/site-packages/rclpy/__init__.py", line 119, in spin
executor.spin_once()
File "/opt/ros/crystal/lib/python3.6/site-packages/rclpy/executors.py", line 572, in spin_once
raise handler.exception()
File "/opt/ros/crystal/lib/python3.6/site-packages/rclpy/task.py", line 206, in __call__
self._handler.send(None)
File "/opt/ros/crystal/lib/python3.6/site-packages/rclpy/action/server.py", line 323, in _execute_goal
execute_result = await await_or_execute(execute_callback, goal_handle)
File "/opt/ros/crystal/lib/python3.6/site-packages/rclpy/executors.py", line 92, in await_or_execute
return callback(*args)
File "/home/developer/ros2_ws/ros2/src/planning_problem/planning_prob_pkg/planning_prob_node.py", line 59, in execute_callback
self._publisher.publish(msg)
File "/opt/ros/crystal/lib/python3.6/site-packages/rclpy/publisher.py", line 28, in publish
_rclpy.rclpy_publish(self.publisher_handle, msg)
ValueError: PyCapsule_GetPointer called with invalid PyCapsule object

I`m not very good with ros python interface
But at first glance, is it you create the publisher as bool type
and you are sending it as a string?

Related

blank() got an unexpected keyword argument 'disable' for TextLMDataBunch

I changed the version of Fastai to 1.0.60 to support the functions TextLMDataBunch and
TextClasDataBunch
# Language model data
data_lm = TextLMDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "")
# Classifier model data
data_clas = TextClasDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "", vocab=data_lm.train_ds.vocab, bs=32)
and i am getting this error when i run the code:
Traceback (most recent call last):
File "/usr/lib/python3.8/concurrent/futures/process.py", line 239, in \_process_worker
r = call_item.fn(\*call_item.args, \*\*call_item.kwargs)
File "/usr/lib/python3.8/concurrent/futures/process.py", line 198, in \_process_chunk
return \[fn(\*args) for args in chunk\]
File "/usr/lib/python3.8/concurrent/futures/process.py", line 198, in \<listcomp\>
return \[fn(\*args) for args in chunk\]
File "/usr/local/lib/python3.8/dist-packages/fastai/text/transform.py", line 112, in \_process_all_1
tok = self.tok_func(self.lang)
File "/usr/local/lib/python3.8/dist-packages/fastai/text/transform.py", line 25, in __init__
self.tok = spacy.blank(lang, disable=\["parser","tagger","ner"\])
TypeError: blank() got an unexpected keyword argument 'disable'
"""
The above exception was the direct cause of the following exception:
TypeError Traceback (most recent call last)
\<ipython-input-12-6aea115a7181\> in \<module\>
1 # Language model data
\----\> 2 data_lm = TextLMDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "")
3 # Classifier model data
4 data_clas = TextClasDataBunch.from_df(train_df = df_trn, valid_df = df_val, path = "", vocab=data_lm.train_ds.vocab, bs=32)
15 frames
/usr/local/lib/python3.8/dist-packages/fastai/text/transform.py in __init__()
23 "Wrapper around a spacy tokenizer to make it a `BaseTokenizer`."
24 def __init__(self, lang:str):
\---\> 25 self.tok = spacy.blank(lang, disable=\["parser","tagger","ner"\])
26
27 def tokenizer(self, t:str) -\> List\[str\]:
TypeError: blank() got an unexpected keyword argument 'disable'
the version cannot be updated as if it were updated,the the functions TextLMDataBunch and
TextClasDataBunch are not supported.
What modifications should i do to this code?

Neptune - RuntimeError: Connection was already closed in pythongremlin

I keep getting the following error even after implementing backoff.
connection -> db.py
import os
import logging
import sys
from gremlin_python.driver import serializer
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from tornado.websocket import WebSocketClosedError
from gremlin_python.driver import client
# Exception
reconnectable_err_msgs = [
'ReadOnlyViolationException',
'Server disconnected',
'Connection refused'
]
retriable_err_msgs = ['ConcurrentModificationException'] + reconnectable_err_msgs
network_errors = [WebSocketClosedError, OSError]
retriable_errors = [GremlinServerError] + network_errors
# Connection Retry
def reset_connection_if_connection_issue(params):
is_reconnectable = False
e = sys.exc_info()[1]
err_msg = str(e)
if isinstance(e, tuple(network_errors)):
is_reconnectable = True
else:
is_reconnectable = any(reconnectable_err_msg in err_msg for reconnectable_err_msg in reconnectable_err_msgs)
logging.info("is_reconnectable: {}".format(is_reconnectable))
if is_reconnectable:
global conn
global g
conn.close()
conn = create_remote_connection()
g = create_graph_traversal_source(conn)
def is_retriable_error(e):
is_retriable = False
error_msg = str(e)
if isinstance(e, tuple(network_errors)):
is_retriable = True
else:
is_retriable = any(retriable_err_msg in error_msg for retriable_err_msg in retriable_err_msgs)
print("error: [{}] {}".format(type(e), error_msg))
print("is_retriable: {}".format(is_retriable))
return is_retriable
def is_non_retriable_error(e):
return not is_retriable_error(e)
# Database connection
def create_remote_connection():
logging.info("Creating remote connection")
return DriverRemoteConnection(
connection_string(),
'g',
pool_size=1, # Pool connection is limited to 1
message_serializer=serializer.GraphSONSerializersV2d0()
)
def connection_string():
database_url = 'wss://{}:{}/gremlin'.format(os.environ.get('neptuneEndpoint'), os.environ.get('neptunePort'))
logging.info("Connection String %s", database_url)
return database_url
def create_graph_traversal_source(conn):
logging.info("Connection successful, creating graph traversal")
return traversal().withRemote(conn)
conn = create_remote_connection()
g = create_graph_traversal_source(conn)
def get_cli():
return client.Client(connection_string(), 'g')
Query:-
#backoff.on_exception(backoff.constant, tuple(db.retriable_errors), max_tries=5, jitter=None,
giveup=db.is_non_retriable_error, on_backoff=db.reset_connection_if_connection_issue, interval=1)
def is_user_available(event):
logging.debug("check the user from Neptune")
return db.g.V(cognito_username).hasNext()
Error Message:
Connection was already closed.: RuntimeError
Traceback (most recent call last):
File "/var/task/backoff/_sync.py", line 94, in retry
ret = target(*args, **kwargs)
File "/var/task/chalice/app.py", line 1605, in __call__
return self.handler(event_obj)
File "/var/task/chalice/app.py", line 1558, in __call__
return self._original_func(event.to_dict(), event.context)
File "/var/task/app.py", line 18, in poll_role_handler
return queries(event, context)
File "/var/task/app.py", line 35, in queries
return query.list_poll_role(event, db.g)
File "/var/task/chalicelib/query.py", line 23, in list_poll_role
.dedup().hasLabel('user').count().next()
File "/var/task/gremlin_python/process/traversal.py", line 88, in next
return self.__next__()
File "/var/task/gremlin_python/process/traversal.py", line 47, in __next__
self.traversal_strategies.apply_strategies(self)
File "/var/task/gremlin_python/process/traversal.py", line 548, in apply_strategies
traversal_strategy.apply(traversal)
File "/var/task/gremlin_python/driver/remote_connection.py", line 63, in apply
remote_traversal = self.remote_connection.submit(traversal.bytecode)
File "/var/task/gremlin_python/driver/driver_remote_connection.py", line 60, in submit
results = result_set.all().result()
File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/var/task/gremlin_python/driver/resultset.py", line 90, in cb
f.result()
File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 425, in result
return self.__get_result()
File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/var/lang/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/task/gremlin_python/driver/connection.py", line 82, in _receive
data = self._transport.read()
File "/var/task/gremlin_python/driver/aiohttp/transport.py", line 104, in read
raise RuntimeError("Connection was already closed.")
RuntimeError: Connection was already closed.
Connection was already closed.: RuntimeError Traceback (most recent call last): File "/var/task/backoff/_sync.py", line 94, in retry ret = target(*args, **kwargs) File "/var/task/chalice/app.py", line 1605, in __call__ return self.handler(event_obj) File "/var/task/chalice/app.py", line 1558, in __call__ return self._original_func(event.to_dict(), event.context) File "/var/task/app.py", line 18, in poll_role_handler return queries(event, context) File "/var/task/app.py", line 35, in queries return query.list_poll_role(event, db.g) File "/var/task/chalicelib/query.py", line 23, in list_poll_role .dedup().hasLabel('user').count().next() File "/var/task/gremlin_python/process/traversal.py", line 88, in next return self.__next__() File "/var/task/gremlin_python/process/traversal.py", line 47, in __next__ self.traversal_strategies.apply_strategies(self) File "/var/task/gremlin_python/process/traversal.py", line 548, in apply_strategies traversal_strategy.apply(traversal) File "/var/task/gremlin_python/driver/remote_connection.py", line 63, in apply remote_traversal = self.remote_connection.submit(traversal.bytecode) File "/var/task/gremlin_python/driver/driver_remote_connection.py", line 60, in submit results = result_set.all().result() File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 432, in result return self.__get_result() File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result raise self._exception File "/var/task/gremlin_python/driver/resultset.py", line 90, in cb f.result() File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 425, in result return self.__get_result() File "/var/lang/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result raise self._exception File "/var/lang/lib/python3.6/concurrent/futures/thread.py", line 56, in run result = self.fn(*self.args, **self.kwargs) File "/var/task/gremlin_python/driver/connection.py", line 82, in _receive data = self._transport.read() File "/var/task/gremlin_python/driver/aiohttp/transport.py", line 104, in read raise RuntimeError("Connection was already closed.") RuntimeError: Connection was already closed.
How to fix this connection issue?
Do you see any of the logging in is_retriable_error? If you do, then we know that this logic at least is being triggered, and can probably add 'Connection was already closed.' as a reconnectable error message. If not, try:
reconnectable_err_msgs = [
'ReadOnlyViolationException',
'Server disconnected',
'Connection refused',
'Connection was already closed.'
]
retriable_err_msgs = ['ConcurrentModificationException'] + reconnectable_err_msgs
network_errors = [WebSocketClosedError, OSError]
retriable_errors = [GremlinServerError, RuntimeError] + network_errors
Here I've added not only the error message to the reconnectable error messages, but RuntimeError to the retriable errors.
If this doesn't work then the issue then it will likely require a new reproducer for the AIOHTTP transport (the guidance was originally written for the Tornado transport).

Flask reqparser.parse_args with docker passes an ellipsis object - Error

an issue has been plaguing me recently, being that when I attempt to make a post request with Postman to a service hosted on docker I receive a TypeError:
File "/usr/local/lib/python3.9/site-packages/werkzeug/datastructures.py", line 554, in update
for key, value in iter_multi_items(mapping):
TypeError: cannot unpack non-iterable ellipsis object
This occurs when this function body is run on a POST from Postman:
def post(self):
from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument('rate', type=int, help='Rate cannot be converted')
parser.add_argument('name')
args = parser.parse_args()
The JSON body passed in the POST request is:
{
"rate" : 1,
"name" : "Test"
}
I've located the issue with Docker as this only occurs when the service is hosted with docker, if it is hosted locally then no issue occurs.
-- Edit
To give more context, my init file looks like this, the error occurring when the POST function is called in postman.
from flask import Flask, g
from flask_restful import Resource, Api, reqparse
import os
import shelve
app = Flask(__name__)
api = Api(app)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = shelve.open("rates.db")
return db
#app.teardown_appcontext
def teardown_db(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
#app.route("/")
def index():
return "Hello World"
class RateList(Resource):
def get(self):
shelf = get_db()
keys = list(shelf.keys())
devices = []
for key in keys:
devices.append(shelf[key])
return {'message' : 'Success', 'data' : devices}, 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('rate', type=int, help='Rate cannot be converted')
parser.add_argument('name')
args = parser.parse_args()
shelf = get_db()
shelf[args['rate']] = args
return {'message' : 'Rate registered', 'data' : args}, 201
class Rate(Resource):
def get(self, rate):
shelf = get_db()
if not(rate in shelf):
return {'message' : 'Rate not found', 'data' : {}}, 404
return {'message' : 'Rate found', 'data' : shelf[rate]}, 200
def delete(self, rate):
shelf = get_db()
if not(rate in shelf):
return {'message' : 'Rate not found', 'data' : {}}, 404
del shelf[rate]
return {'message' : 'Rate deleted', 'data' : {}}, 200
api.add_resource(RateList, '/Rate')
api.add_resource(Rate, '/Rate/<string:rate>')
Docker-compose file:
version: '3.4'
services:
stock-registry:
build: .
volumes:
- .:/usr/src/app
ports:
- 5001:80
Dockerfile:
FROM python:3
WORKDIR /usr/src/app
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
CMD ["python", "./run.py"]
Run.py:
from stock_registry import app
app.run(host='0.0.0.0', port=80, debug=True)
The complete error log that shows when this is attempted:
172.22.0.1 - - [30/Jun/2021 12:37:12] "POST /shares HTTP/1.1" 500 -
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1997, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1985, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask_restful/__init__.py", line 265, in error_router
return original_handler(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 32, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.9/site-packages/flask_restful/__init__.py", line 265, in error_router
return original_handler(e)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.9/site-packages/flask/_compat.py", line 32, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.9/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.9/site-packages/flask_restful/__init__.py", line 446, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/flask_restful/__init__.py", line 550, in dispatch_request
resp = meth(*args, **kwargs)
File "/usr/src/app/stock_registry/__init__.py", line 65, in post
args = parser.parse_args()
File "/usr/local/lib/python3.9/site-packages/flask_restful/reqparse.py", line 261, in parse_args
value, found = arg.parse(req)
File "/usr/local/lib/python3.9/site-packages/flask_restful/reqparse.py", line 143, in parse
source = self.source(request)
File "/usr/local/lib/python3.9/site-packages/flask_restful/reqparse.py", line 101, in source
values.update(value)
File "/usr/local/lib/python3.9/site-packages/werkzeug/datastructures.py", line 554, in update
for key, value in iter_multi_items(mapping):
TypeError: cannot unpack non-iterable ellipsis object

ObjectModel accessing incoming relations

I have two nodes A and B. They have a directed relation from A to B.
Thus, A has a ConnectedTo attributed of type RelatedTo. However, I want to iterate over all B nodes and access the incoming relations from A.
How can I do this?
I tried adding a ConnectedTo attribute of type RelatedFrom to B but when querying the graph I get a ValueError('Invalid Identifier').
class A(GraphObject):
__primarykey__ = "hash"
hash = Property()
ConnectedTo = RelatedTo('B')
def __init__(self, hash):
self.hash = hash
class B(GraphObject):
__primarykey__ = "hash"
hash = Property()
ConnectedTo = RelatedFrom('A')
def __init__(self, hash):
self.hash = hash
>>> a = A("testA")
>>> b = B("testB")
>>> a.ConnectedTo.add(b)
>>> graph.push(a)
>>> graph.push(b)
>>> test = B.select(graph).first()
Results in error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 442, in first
return self._object_class.wrap(super(GraphObjectSelection, self).first())
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 344, in wrap
_ = getattr(inst, attr)
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 90, in __get__
related[key] = RelatedObjects(cog.node, self.direction, self.relationship_type, self.related_class)
File "/usr/local/lib/python2.7/dist-packages/py2neo/ogm.py", line 135, in __init__
self.__relationship_pattern = "(a)<-[_:%s]-(b)" % cypher_escape(relationship_type)
File "/usr/local/lib/python2.7/dist-packages/py2neo/database/cypher.py", line 221, in cypher_escape
writer.write_identifier(identifier)
File "/usr/local/lib/python2.7/dist-packages/py2neo/database/cypher.py", line 78, in write_identifier
raise ValueError("Invalid identifier")
ValueError: Invalid identifier
The solution was easier than expected:
class TestA(GraphObject):
__primarykey__ = "hash"
hash = Property()
CONNECTEDTO = RelatedTo('TestB')
def __init__(self, hash):
self.hash = hash
class TestB(GraphObject):
__primarykey__ = "hash"
hash = Property()
CONNECTEDTO = RelatedFrom('TestA', "CONNECTEDTO")
def __init__(self, hash):
self.hash = hash
>>> a = A("testA")
>>> b = B("testB")
>>> a.ConnectedTo.add(b)
>>> graph.push(a)
>>> graph.push(b)
>>> test = B.select(graph).first()
>>> list(test.CONNECTEDTO)
[ TestA ]
The important part is RelatedFrom('TestA','CONNECTEDTO'). You have to specify what the incoming connection is called.

Tweepy (twitter) socket.error Errno 104 (Connection reset by peer)

I am trying to acces the Streaming API, filter it by some terms and then print out the results, using Tweepy. However I am getting the following error:
File "/usr/local/lib/python2.6/dist-packages/tweepy-1.7.1-py2.6.egg/tweepy/streaming.py", line 110, in _run
resp = conn.getresponse()
File "/usr/lib/python2.6/httplib.py", line 986, in getresponse
response.begin()
File "/usr/lib/python2.6/httplib.py", line 391, in begin
version, status, reason = self._read_status()
File "/usr/lib/python2.6/httplib.py", line 349, in _read_status
line = self.fp.readline()
File "/usr/lib/python2.6/socket.py", line 397, in readline
data = recv(1)
socket.error: [Errno 104] Connection reset by peer
With the following code...
import sys
import tweepy
from textwrap import TextWrapper
from tweepy.streaming import StreamListener, Stream
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth1 = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth1.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth1)
class StreamListener(tweepy.StreamListener):
status_wrapper = TextWrapper(width=60, initial_indent=' ', subsequent_indent=' ')
def on_status(self, status):
try:
print self.status_wrapper.fill(status.text)
print '\n %s %s via %s\n' % (status.author.screen_name, status.created_at, status.source)
except Exception, e:
pass
def main():
l = StreamListener()
streamer = tweepy.Stream(auth=auth1, listener=l, timeout=3000000000 )
setTerms = ['hello', 'goodbye', 'goodnight', 'good morning']
streamer.filter(None,setTerms)
if __name__ == "__main__":
main()
Does anyone know how to solve it?
Thanks...
The reason was SSL, it seems to be forced by twitter now...

Resources