Invoking "make cmake_check_build_system" failed - ros

```
File "/opt/ros/melodic/lib/python2.7/dist-packages/genmsg/msg_loader.py", line 266, in load_msg_from_string
field_type, name = _load_field_line(orig_line, package_name)
File "/opt/ros/melodic/lib/python2.7/dist-packages/genmsg/msg_loader.py", line 225, in _load_field_line
raise InvalidMsgSpec("Invalid declaration: %s"%(orig_line))
genmsg.base.InvalidMsgSpec: Invalid declaration: ...
CMake Error at /opt/ros/melodic/share/catkin/cmake/safe_execute_process.cmake:11 (message):
execute_process(/home/akash/catkin_ws/build/catkin_generated/env_cached.sh
"/usr/bin/python2" "/usr/bin/empy" "--raw-errors" "-F"
"/home/akash/catkin_ws/build/service_node/cmake/service_node-genmsg-context.py"
"-o"
"/home/akash/catkin_ws/build/service_node/cmake/service_node-genmsg.cmake"
"/opt/ros/melodic/share/genmsg/cmake/pkg-genmsg.cmake.em") returned error
code 1
Call Stack (most recent call first):
/opt/ros/melodic/share/catkin/cmake/em_expand.cmake:25 (safe_execute_process)
/opt/ros/melodic/share/genmsg/cmake/genmsg-extras.cmake:303 (em_expand)
service_node/CMakeLists.txt:71 (generate_messages)
-- Configuring incomplete, errors occurred!
See also "/home/akash/catkin_ws/build/CMakeFiles/CMakeOutput.log".
See also "/home/akash/catkin_ws/build/CMakeFiles/CMakeError.log".
Makefile:404: recipe for target 'cmake_check_build_system' failed
make: *** [cmake_check_build_system] Error 1
Invoking "make cmake_check_build_system" failed
So i am new to ros and i am following a tutorial i created a package named service_node and then i created a srv inside it named AddTwoInts.srv
int64 a
int64 b
...
int64 sum
Then it asked me to created a scripts named client.py
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
from service_node.srv import *
def add_two_ints_client(x, y):
rospy.wait_for_service('add_two_ints')
try:
add_two_ints = rospy.ServiceProxy('add_two_ints', AddTwoInts)
resp1 = add_two_ints(x, y)
return resp1.sum
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def usage():
return "%s [x y]"%sys.argv[0]
if __name__ == "__main__":
if len(sys.argv) == 3:
x = int(sys.argv[1])
y = int(sys.argv[2])
else:
print(usage())
sys.exit(1)
print("Requesting %s+%s"%(x, y))
print("%s + %s = %s"%(x, y, add_two_ints_client(x, y)))
and a server.py
#!/usr/bin/env python
from __future__ import print_function
from service_node.srv import AddTwoInts,AddTWoIntsResponse
import rospy
def handle_add_two_inst(req):
print("Returning [%s + %s = %s]"%(req.a, req.b, (req.a + req.b)))
return AddTwoIntsResponse(req.a + req.b)
def add_two_ints_server():
rospy.init_node('add_two_ints_server')
s= rospy.Service('add_two_ints', AddTwoInts, handle_add_two_ints)
print("Ready to add two ints.")
rospy.spin()
if __name__ == "__main__":
add_two_ints_server()
then i did some changes on cmakelist
such as unindented add_service_files
add_service_files(
FILES
AddTwoInts.srv
# Service2.srv
)
generate_messages(
DEPENDENCIES
std_msgs
)
find_package(catkin REQUIRED COMPONENTS
message_generation
rospy
std_msgs
)
catkin_install_python(PROGRAMS
scripts/server.py scripts/client.py
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
also so i edited the pacakge.xml and unindented the message_runtime
and when i try to run
catkin_make
idk why i get the error

Related

My python script does not publish velocity command to parrot drone in ROS

I would like to publish velocities for my ARDrone using /cmd_vel topic using the below python script. But it does nothing. It does not publish the required information.
What is wrong in the below code?
#!/usr/bin/env python3
import numpy as np
import rospy
from geometry_msgs.msg import Twist
class KeyboardControl:
def __init__(self):
rospy.init_node('Script_controlling_ARDrone', anonymous=False)
publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
twist = Twist()
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = -1
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0.5
publisher.publish(twist)
def main():
try:
kc = KeyboardControl()
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
if __name__ == '__main__':
main()
This does actually do something, however it's probably not noticeable. This is because it only publishes something once when the object is created. If you would like you publish periodically you should use a main run loop like so:
import numpy as np
import rospy
from geometry_msgs.msg import Twist
class KeyboardControl:
def __init__(self):
rospy.init_node('Script_controlling_ARDrone', anonymous=False)
self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.rate = rospy.Rate(10) #10Hz
def main():
try:
kc = KeyboardControl()
twist = Twist()
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = -1
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0.5
while not rospy.is_shutdown():
#Do any needed edits to the twist message here
kc.pub.publish(twist)
kc.rate.sleep()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
if __name__ == '__main__':
main()

Apache Beam - Multiple Pcollection - Dataframetransform Issue

I am running a below sample in apache beam
import apache_beam as beam
from apache_beam import Row
from apache_beam import Pipeline
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.dataframe.convert import to_dataframe, to_pcollection
from apache_beam.dataframe.transforms import DataframeTransform
import logging
import argparse
import sys
import pandas
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(sys.argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
# Just a Dummy Dataframe Transform Function, Ignore the logic
def transformdf(a, b):
a["addr"] = "addr-common"
return a
p = beam.Pipeline(options=pipeline_options)
# Schema Aware Pcollection
data1 = [Row(id=1, name="abc"), Row(id=2, name="def"), Row(id=3, name="ghi")]
pcol1 = (p | "Create1" >> beam.Create(data1))
data2 = [Row(addr="addr1"), Row(addr="addr2"), Row(addr="addr3")]
pcol2 = (p | "Create2" >> beam.Create(data2))
pcol = ({"a":pcol1, "b":pcol2} | "TransformedDF" >> DataframeTransform(transformdf))
# The above throws issue with duplicate label error
pcol | "Map" >> beam.Map(lambda row: {"id":row.id, "name":row.name, "addr":row.addr}) | "Print" >> beam.Map(print)
p.run().wait_until_finish()
The code errors out with the error
`
RuntimeError: A transform with label "TransformedDF/BatchElements(pc)" already exists in the pipeline
`
The syntax and usage seems correct as per the link
https://beam.apache.org/documentation/dsls/dataframes/overview/#embedding-dataframes-in-a-pipeline
output = {"a":pcol1, "b":pcol2"} | DataframeTransform(lambda/function)
I am currently using apache beam 2.35.0
Is this issue with Python SDK?

Pytorch N - Beats model throwing error: 'str' object has no attribute '__name__'

I'm trying to replicate pytorch's N - Beats model in colab. I copied the same code from https://pytorch-forecasting.readthedocs.io/en/stable/tutorials/ar.html to a colab notebook. There is an error showing up at training cell.
import os
import warnings
warnings.filterwarnings("ignore")
os.chdir("../../..")
import pandas as pd
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
import torch
from pytorch_forecasting import Baseline, NBeats, TimeSeriesDataSet
from pytorch_forecasting.data import NaNLabelEncoder
from pytorch_forecasting.data.examples import generate_ar_data
from pytorch_forecasting.metrics import SMAPE
data = generate_ar_data(seasonality=10.0, timesteps=400, n_series=100, seed
= 42)
data["static"] = 2
data["date"] = pd.Timestamp("2020-01-01") + pd.to_timedelta(data.time_idx, "D")
data.head()
# create dataset and dataloaders
max_encoder_length = 60
max_prediction_length = 20
training_cutoff = data["time_idx"].max() - max_prediction_length
context_length = max_encoder_length
prediction_length = max_prediction_length
training = TimeSeriesDataSet(
data[lambda x: x.time_idx <= training_cutoff],
time_idx="time_idx",
target="value",
categorical_encoders={"series": NaNLabelEncoder().fit(data.series)},
group_ids=["series"],
# only unknown variable is "value" - and N-Beats can also not take any additional variables
time_varying_unknown_reals=["value"],
max_encoder_length=context_length,
max_prediction_length=prediction_length,
)
validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff + 1)
batch_size = 128
train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)
Error is:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-67-db4b0ef13391> in <module>()
25 net,
26 train_dataloader=train_dataloader,
---> 27 val_dataloaders=val_dataloader,
28 )
30 frames
/usr/local/lib/python3.7/dist-packages/yaml/representer.py in represent_object(self, data)
329 if dictitems is not None:
330 dictitems = dict(dictitems)
--> 331 if function.__name__ == '__newobj__':
332 function = args[0]
333 args = args[1:]
AttributeError: 'str' object has no attribute '__name__'
downgrading pytorch-lightning from 1.2.1 to 1.1.8 solved it for me.
Ran into a similar issue recently and found that downgraded pandas to 1.2.5 resolved that
Answer from #PVJ worked for me. For the sake of completeness, you can downgrade pytorch_lightning through:
pip install --upgrade pytorch_lightning==1.1.8

Isolating the topk routine of dask

I try to isolate the topk routine from dask.
Somehow it dies in isolation.
Apparently, numpy array instead of dask array is passed to the x argument during the recursion.
The original source code for topk is at: https://github.com/dask/dask/blob/master/dask/array/routines.py
Test program:
import numpy as np
import dask.array as da
from dask.base import tokenize
from operator import getitem
import dask.sharedict as sharedict
from dask.array.core import Array
def topk(k, x):
if x.ndim != 1:
raise ValueError("Topk only works on arrays of one dimension")
token = tokenize(k, x)
name = 'chunk.topk-' + token
dsk = {(name, i): (topk, k, key)
for i, key in enumerate(x.__dask_keys__())}
name2 = 'topk-' + token
dsk[(name2, 0)] = (getitem, (np.sort, (np.concatenate, list(dsk))),
slice(-1, -k - 1, -1))
chunks = ((k,),)
return Array(sharedict.merge((name2, dsk), x.dask), name2, chunks, dtype=x.dtype)
def main():
x = np.arange(12)*8
y = da.from_array(x, 7)
print(y.topk(2).compute())
print(topk(2, y).compute())
main()
Error:
File "test_dask_argtopk.py", line 40, in <module>
main()
File "test_dask_argtopk.py", line 38, in main
print(topk(2, y).compute())
File "test_dask_argtopk.py", line 27, in topk
for i, key in enumerate(x.__dask_keys__())}
AttributeError: 'Array' object has no attribute '__dask_keys__'

OpenCL Theano - How to forcefully disable CUDA?

After a series of pains, I have installed Theano on a machine with AMD graphics card - Radeon HD 5450 (Cedar).
Now, consider a following code.
import numpy
import theano
import theano.tensor as T
rng = numpy.random
N = 400 #number of samples
feats = 784 #dimensionality of features
D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2))
training_steps = 10000
# theano symbolic variables
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(rng.randn(784), name="w")
b = theano.shared(0., name="b")
print("Initial Model:")
print(str(w.get_value()) + " " + str(b.get_value()) )
p_1 = 1/(1 + T.exp(-T.dot(x, w) - b)) # probability of target being 1
prediction = p_1 > 0.5 # prediction threshold
xent = -y * T.log(p_1) - (1-y)*T.log(1-p_1) # cross-entropy loss function
cost = xent.mean() + 0.01 * (w**2).sum() # cost - to be minimized
gw, gb = T.grad(cost, [w, b])
#compile it
train = theano.function(
inputs = [x, y],
outputs = [prediction, xent],
updates = {w: w - 0.1*gw, b: b - 0.1*gb} )
predict = theano.function(inputs = [x], outputs = prediction)
#train it
for i in range (training_steps):
pred, err = train(D[0], D[1])
print("Final Model: ")
print(str(w.get_value()) + " " + str(b.get_value()) )
print("Target values for D: " + str(D[1]))
print("Predictions on D: " + str(D[0]))
I think this code should work just fine. But I get a series of errors:
ERROR (theano.gof.opt): Optimization failure due to: local_gpua_hgemm
ERROR (theano.gof.opt): node: dot(x.T, Elemwise{sub,no_inplace}.0)
ERROR (theano.gof.opt): TRACEBACK:
ERROR (theano.gof.opt): Traceback (most recent call last):
File "/home/user/anaconda3/lib/python3.5/site-packages/theano/gof/opt.py", line 1772, in process_node
replacements = lopt.transform(node)
File "/home/user/anaconda3/lib/python3.5/site-packages/theano/sandbox/gpuarray/opt.py", line 140, in local_opt
new_op = maker(node, context_name)
File "/home/user/anaconda3/lib/python3.5/site-packages/theano/sandbox/gpuarray/opt.py", line 732, in local_gpua_hgemm
if nvcc_compiler.nvcc_version < '7.5':
TypeError: unorderable types: NoneType() < str()
And I get the same set of messages multiple times. Then at the end:
File "/home/user/anaconda3/lib/python3.5/site-packages/pygpu-0.2.1-py3.5-linux-x86_64.egg/pygpu/elemwise.py", line 286, in __init__
**self.flags)
File "pygpu/gpuarray.pyx", line 1950, in pygpu.gpuarray.GpuKernel.__cinit__ (pygpu/gpuarray.c:24214)
File "pygpu/gpuarray.pyx", line 467, in pygpu.gpuarray.kernel_init (pygpu/gpuarray.c:7174)
pygpu.gpuarray.UnsupportedException: ('The following error happened while compiling the node', GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>(GpuFromHost<None>.0, InplaceGpuDimShuffle{x}.0), '\n', b'Device does not support operation')
Does this mean I cannot use this GPU or I have done something wrong in my code. Moreover, from the errors, it seems there is been a search for nvcc. But I do not have CUDA, I have opencl.
>>> import theano
Mapped name None to device opencl0:0: Cedar
also:
>>> from theano import config
>>> config.device
'opencl0:0'
>>> config.cuda
<theano.configparser.AddConfigVar.<locals>.SubObj object at 0x7fba9dee7d30>
>>> config.nvcc
<theano.configparser.AddConfigVar.<locals>.SubObj object at 0x7fba9e5967f0>
>>> config.gpu
<theano.configparser.AddConfigVar.<locals>.SubObj object at 0x7fbaa9f61828>
So how do I go from here? Is there way to make sure clcc is searched instead of nvcc.
PS_1: hello world works.
PS_2: System = 14.04 64 bit
OpenCL is not yet supported by Theano. As a result, only NVIDIA GPUs are supported.
The status of OpenCL is recorded on GitHub.
You need to disable GPU operation by setting device=cpu in your Theano config. There are multiple ways to do this (i.e. via THEANO_FLAGS environment variable or via a .theanorc file; see documentation).
Before running the script, try setting
export THEANO_FLAGS=device=cpu,floatX=float64
Your situation may need additional configuration options. See the documentation for more.

Resources