How to spllit laserscan data from lidar into sections and view them on rviz - ros

I was trying to split the laser scan range data into subcategories and like to publish each category into different laser topics.
to specify more, the script should get one topic as an input - /scan and the script should publish three topics as follow = scan1, scan2, scan3
is there a way to split the laser scan and publish back and look them on rviz
I tried the following
def callback(laser):
current_time = rospy.Time.now()
regions["l_f_fork"] = laser.ranges[0:288]
regions["l_f_s"] = laser.ranges[289:576]
regions["stand"] = laser.ranges[576:864]
l.header.stamp = current_time
l.header.frame_id = 'laser'
l.angle_min = 0
l.angle_max = 1.57
l.angle_increment =0
l.time_increment = 0
l.range_min = 0.0
l.range_max = 100.0
l.ranges = regions["l_f_fork"]
l.intensities = [0]
left_fork.publish(l)
# l.ranges = regions["l_f_s"]
# left_side.publish(l)
# l.ranges = regions["stand"]
# left_side.publish(l)
rospy.loginfo("publishing new info")
I can see the different topics on rviz, but they are lies on the same line,

Tutorial
The following code splits the LaserScan data into three equal sections:
#! /usr/bin/env python3
"""
Program to split LaserScan into three parts.
"""
import rospy
from sensor_msgs.msg import LaserScan
class LaserScanSplit():
"""
Class for splitting LaserScan into three parts.
"""
def __init__(self):
self.update_rate = 50
self.freq = 1./self.update_rate
# Initialize variables
self.scan_data = []
# Subscribers
rospy.Subscriber("/scan", LaserScan, self.lidar_callback)
# Publishers
self.pub1 = rospy.Publisher('/scan1', LaserScan, queue_size=10)
self.pub2 = rospy.Publisher('/scan2', LaserScan, queue_size=10)
self.pub3 = rospy.Publisher('/scan3', LaserScan, queue_size=10)
# Timers
rospy.Timer(rospy.Duration(self.freq), self.laserscan_split_update)
def lidar_callback(self, msg):
"""
Callback function for the Scan topic
"""
self.scan_data = msg
def laserscan_split_update(self, event):
"""
Function to update the split scan topics
"""
scan1 = LaserScan()
scan2 = LaserScan()
scan3 = LaserScan()
scan1.header = self.scan_data.header
scan2.header = self.scan_data.header
scan3.header = self.scan_data.header
scan1.angle_min = self.scan_data.angle_min
scan2.angle_min = self.scan_data.angle_min
scan3.angle_min = self.scan_data.angle_min
scan1.angle_max = self.scan_data.angle_max
scan2.angle_max = self.scan_data.angle_max
scan3.angle_max = self.scan_data.angle_max
scan1.angle_increment = self.scan_data.angle_increment
scan2.angle_increment = self.scan_data.angle_increment
scan3.angle_increment = self.scan_data.angle_increment
scan1.time_increment = self.scan_data.time_increment
scan2.time_increment = self.scan_data.time_increment
scan3.time_increment = self.scan_data.time_increment
scan1.scan_time = self.scan_data.scan_time
scan2.scan_time = self.scan_data.scan_time
scan3.scan_time = self.scan_data.scan_time
scan1.range_min = self.scan_data.range_min
scan2.range_min = self.scan_data.range_min
scan3.range_min = self.scan_data.range_min
scan1.range_max = self.scan_data.range_max
scan2.range_max = self.scan_data.range_max
scan3.range_max = self.scan_data.range_max
# LiDAR Range
n = len(self.scan_data.ranges)
scan1.ranges = [float('inf')] * n
scan2.ranges = [float('inf')] * n
scan3.ranges = [float('inf')] * n
# Splitting Block [three equal parts]
scan1.ranges[0 : n//3] = self.scan_data.ranges[0 : n//3]
scan2.ranges[n//3 : 2*n//3] = self.scan_data.ranges[n//3 : 2*n//3]
scan3.ranges[2*n//3 : n] = self.scan_data.ranges[2*n//3 : n]
# Publish the LaserScan
self.pub1.publish(scan1)
self.pub2.publish(scan2)
self.pub3.publish(scan3)
def kill_node(self):
"""
Function to kill the ROS node
"""
rospy.signal_shutdown("Done")
if __name__ == '__main__':
rospy.init_node('laserscan_split_node')
LaserScanSplit()
rospy.spin()
The following are screenshots of the robot and obstacles in the environment in Gazebo and RViz:
References:
ROS1 Python Boilerplate
atreus

Related

How to print the value of lineEdit?

How to print the value of lineEdit
class MyApp(QMainWindow,MainUI):
def __init__(self):
super(MyApp, self).__init__()
QMainWindow.__init__(self,)
self.setupUi(self)
self._generator = None
self._timerId = None
sys.stdout = EmittingStream(textWritten=self.normalOutputWritten)
self.sinais_usados = []
self.lucro = 0
self.filtro = []
self.Exp = []
self.valor_Ad = float(self.lineEdit_valueEntry.text())
self.pushButton_4.clicked.connect(self.Login)
self.pushButton_7.clicked.connect(self.getFiles)
# self.check_porcentagem.stateChanged.connect(self.setarPorcentagem)
self.pushButton_3.clicked.connect(self.start)
self.pushButton.clicked.connect(self.sdd)
self.show()
# self.lineEdit_valueEntry = QLineEdit( float(self.lineEdit_valueEntry.text()))
def sdd(self):
valor_entrada = self.valor_Ad
print(valor_entrada)

Scrapy Splash - I am not able to get the value

I am trying to scrape this page: https://simple.ripley.com.pe/laptop-lenovo-ideapad-5-amd-ryzen-7-16gb-ram-256gb-ssd-14-2004286061746p?s=o
All ok, but I am not able to get the values in this xpath:
//*[#id="panel-Especificaciones"]/div/div/table/tbody/tr[19]/td[2]
I think it loads dynamically. It's a table with many rows inside. I would like to get those values.
Image: page section i can't scrape
This is my spider code:
import scrapy
from scrapy_splash import SplashRequest
from numpy import nan
LUA_SCRIPT = """
function main(splash)
splash.private_mode_enabled = false
splash:go(splash.args.url)
splash:wait(2)
html = splash:html()
splash.private_mode_enabled = true
return html
end
"""
class RipleySpider(scrapy.Spider):
name = "ripley"
def start_requests(self):
url = 'https://simple.ripley.com.pe/tecnologia/computacion/laptops?facet%5B%5D=Procesador%3AIntel+Core+i7'
yield SplashRequest(url=url, callback=self.parse)
def parse(self, response):
for link in response.xpath("//div[#class='catalog-container']/div/a/#href"):
yield response.follow(link.get(), callback=self.parse_products)
# for href in response.xpath("//ul[#class='pagination']/li[last()]/a/#href").getall():
# yield SplashRequest(response.urljoin(href), callback=self.parse)
def parse_products(self, response):
titulo = response.css("h1::text").get()
link = response.request.url
sku = response.css(".sku-value::text").get()
precio = response.css(".product-price::text").getall()
if len(precio)==1:
precio_normal = nan
precio_internet = precio[0]
precio_tarjeta_ripley = nan
elif len(precio)==2:
precio_normal = precio[0]
precio_internet = precio[1]
precio_tarjeta_ripley = nan
elif len(precio)==4:
precio_normal = precio[0]
precio_internet = precio[1]
precio_tarjeta_ripley = precio[-1]
try:
# descripcion = response.css(".product-short-description::text").get()
descripcion = response.xpath('//*[#id="panel-Especificaciones"]/div/div/table/tbody/tr[1]/td[2]/text()').get()
except:
descripcion = 'sin valor'
yield {
'Título': titulo,
'Link': link,
'SKU': sku,
'Precio Normal': precio_normal,
'Precio Internet': precio_internet,
'Precio Tarjeta Ripley': precio_tarjeta_ripley,
'Descripción': descripcion,
}
Please, what solutions does scrapy offer? Thanks in advance for your help.
P.D.: I'm using Docker with Splash in localhost:8050. settings.py according to documentation.

eval() arg 1 must be a string, bytes or code object Traceback (most recent call last)

So I try to deploy a machine Learning model to streaml1t Using Flask. But as we know from the title, the error gave me that 'eval() args1 must be a string
------This is The Code For The Back End------
from flask import Flask, request,jsonify
import pickle
app = Flask(__name__)
with open ('forest_opt.pkl', 'rb') as model_file:
model = pickle.load(model_file)
#app.route('/')
def model_prediction():
age = eval(request.args.get('age'))
internship = eval(request.args.get('internship'))
cgpa = eval(request.args.get('cgpa'))
hostel = eval(request.args.get('hostel'))
history = eval(request.args.get('history'))
new_data = [age, internship, cgpa, hostel, history]
res = model.predict([new_data])
classes = ['No','Yes']
response = {'status': 'success',
'code' : 200,
'data' : {'result':classes(res[0])}
}
return jsonify(response)
#app.route('/predict', methods=['POST'])
def predict_post():
content = request.json
data = [content['age'],
content['internship'],
content['cgpa'],
content['hostel'],
content['history']
]
res = model.predict([data])
response = {'status': 'success',
'code' : 200,
'data' : {'result':str(res[0])}
}
return jsonify(response)
app.run(debug=True)
------This is The Code For The Front End------
import streamlit as st
import requests
URL = 'http://127.0.0.1:5000/'
st.title('App for Detecting Chance of Getting a Job')
age = st.number_input('age')
internship = st.number_input('Internship (0,1,2,3)')
cgpa = st.number_input('cgpa')
hostel = st.number_input('hostel')
history = st.number_input('history')
data = {'age':age,
'internship':internship,
'cgpa':cgpa,
'hostel':hostel,
'history':history}
r = requests.post(URL, json=data)
res = r.json()
st.write (f" Predict The Result: {res['data']['result']}")
The error keep saying this 'age = eval(request.args.get('age'))' I don't know why inside eval() have no information. Please help I kind of new to this one. Thank you !

How to get rid of placements(SERVER or CLIENTS) so that I can transform float32#SERVER to float32?

I am trying to do learning rate decay challange of Building Your Own Federated Learning Algorithm tutorial. I have used the following code
import nest_asyncio
nest_asyncio.apply()
import collections
import attr
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
np.random.seed(0)
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
NUM_CLIENTS = 10
BATCH_SIZE = 20
initial_lr = 0.01
decay_rate = 0.0005
minimum_lr = initial_lr/2
def preprocess(dataset):
def batch_format_fn(element):
return(tf.reshape(element['pixels'],[-1,784]),
tf.reshape(element['label'],[-1,1]))
return dataset.batch(BATCH_SIZE).map(batch_format_fn)
client_ids = np.random.choice(emnist_train.client_ids,
size=NUM_CLIENTS, replace=False)
federated_train_data = [preprocess(emnist_train.create_tf_dataset_for_client(x))
for x in client_ids]
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=federated_train_data[0].element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
#tf.function
def client_update(model, dataset, server_weights, client_optimizer):
client_weights = model.trainable_variables
tf.nest.map_structure(lambda x,y: x.assign(y),
client_weights, server_weights)
for batch in dataset:
with tf.GradientTape() as tape:
outputs = model.forward_pass(batch)
grads = tape.gradient(outputs.loss, client_weights)
grads = tf.clip_by_global_norm(grads, 5.0)[0]
grads_and_vars = zip(grads, client_weights)
client_optimizer.apply_gradients(grads_and_vars)
return client_weights
#tf.function
def server_update(model, mean_client_weights):
model_weights = model.trainable_variables
tf.nest.map_structure(lambda x,y: x.assign(y),
model_weights, mean_client_weights)
return model_weights
#tff.tf_computation
def server_init():
model = model_fn()
return model.trainable_variables
#tff.federated_computation
def initialize_fn():
return [tff.federated_value(server_init(), tff.SERVER), tff.federated_value(initial_lr, tff.SERVER)]
#return tff.federated_value([server_init(),initial_lr], tff.SERVER)
whimsy_model = model_fn()
tf_dataset_type = tff.SequenceType(whimsy_model.input_spec)
str(tf_dataset_type)
model_weights_type = server_init.type_signature.result
str(model_weights_type)
#tff.tf_computation(tf_dataset_type, model_weights_type,tf.float32)
def client_update_fn(tf_dataset, server_weights, LR):
model = model_fn()
client_optimizer=tf.keras.optimizers.SGD(learning_rate=LR)
return client_update(model, tf_dataset, server_weights, client_optimizer)
#tff.tf_computation(model_weights_type)
def server_update_fn(mean_client_weights):
model = model_fn()
return server_update(model, mean_client_weights)
federated_server_type = tff.FederatedType(model_weights_type,
tff.SERVER)
federated_dataset_type = tff.FederatedType(tf_dataset_type,
tff.CLIENTS)
#federated_server_type_with_LR = tff.FederatedType([model_weights_type,tff.to_type((tf.float32))],tff.SERVER)
federated_server_type_with_LR = [tff.FederatedType(model_weights_type,tff.SERVER),
tff.FederatedType(tff.to_type((tf.float32)),tff.SERVER)]
#tf.function
def decay_lr(lr):
if lr-decay_rate > minimum_lr:
return lr-decay_rate
else:
return minimum_lr
#tff.tf_computation(tf.float32)
def decay_lr_fn(lr):
return decay_lr(lr)
#tff.federated_computation(federated_server_type_with_LR, federated_dataset_type)
def next_fn(server_weights_and_LR, federated_dataset):
server_weights = server_weights_and_LR[0]
#LR_SERVER = server_weights_and_LR[1]
#LR_CLIENTS = tff.federated_broadcast(server_weights_and_LR[1])
LR = server_weights_and_LR[1]
LR_NEW = tff.federated_map(decay_lr_fn, LR)
LR_NEW_CLIENTS = tff.federated_broadcast(LR_NEW)
# Broadcast the server weights to the clients
server_weights_at_client = tff.federated_broadcast(server_weights)
# Each client computes their updated weights
client_weights = tff.federated_map(
client_update_fn, (federated_dataset, server_weights_at_client, LR_NEW_CLIENTS))
# The server averages are updated
mean_client_weights = tff.federated_mean(client_weights)
# The surver update
server_weights = tff.federated_map(server_update_fn, mean_client_weights)
#return server_weights_and_LR
return [server_weights, LR_NEW]
federated_algorithm = tff.templates.IterativeProcess(
initialize_fn=initialize_fn,
next_fn=next_fn)
sorted_client_ids = sorted(emnist_test.client_ids)
sorted_client_ids2 = sorted_client_ids[0:100]
def data(client, source=emnist_test):
return preprocess(source.create_tf_dataset_for_client(client))
central_emnist_test = (tf.data.Dataset.from_tensor_slices(
[data(client) for client in sorted_client_ids2])).flat_map(lambda x: x)
def evaluate(server_state):
keras_model = create_keras_model()
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
keras_model.set_weights(server_state)
keras_model.evaluate(central_emnist_test)
server_state = federated_algorithm.initialize()
evaluate(server_state[0])
for round in range(15):
print(round)
#server_state_temp = federated_algorithm.next(server_state, federated_train_data)
#server_state = [server_state_temp[0], decaying_lr(round)]
server_state = federated_algorithm.next(server_state, federated_train_data)
print(server_state[1])
evaluate(server_state[0])
This code works just fine, but I want to add the learning rate definition to server_init() function. So basically have the following
#tff.tf_computation
def server_init():
model = model_fn()
return [model.trainable_variables, initial_lr]
#tff.federated_computation
def initialize_fn():
return tff.federated_value(server_init(), tff.SERVER)
But doing so leads to following problem
The return type of `initialize_fn` must be assignable to the first input argument of `next_fn`, but:
`initialize_fn` returned type:
<<float32[784,10],float32[10]>,float32>#SERVER
and the first input argument of `next_fn` is:
<server_weights_and_LR=<<float32[784,10],float32[10]>#SERVER,float32#SERVER>,federated_dataset={<float32[?,784],int32[?,1]>*}#CLIENTS>
The problem is return [server_weights, LR_NEW] code at the end of next_fn() has <float32[784,10],float32[10]>#SERVER,float32#SERVER> type. Both server_weights and LR_NEW has already #SERVER placement. Currently
#tff.tf_computation
def server_init():
model = model_fn()
return model.trainable_variables
#tff.federated_computation
def initialize_fn():
return [tff.federated_value(server_init(), tff.SERVER), tff.federated_value(initial_lr, tff.SERVER)]
also returns <float32[784,10],float32[10]>#SERVER,float32#SERVER>
But as I said I want to change that part so to do that I want to remove the placements of server_weight and LR_NEW in next_fn and apply placement to the list containing both of those. How can I do that?
Also does anyone have a "cleaner" solution to that challenge?
EDIT:
I just want to clarify the input-output match for initialize/input and next is "cyclic". So we seek a match between output of initialize and input of next but also want one between output of next and input argument.
The first return argument of `next_fn` must be assignable to its first input argument, but found
`next_fn` which returns type:
<<float32[784,10],float32[10]>#SERVER,float32#SERVER>
which does not match its first input argument:
<<float32[784,10],float32[10]>,float32>#SERVER
The problem in your code is when manually creating federated_server_type_with_LR.
In the type system, <A#SERVER, B#SERVER> different from <A, B>#SERVER. You can convert the former to the latter by using tff.federated_zip(), which promotes the placement to the top-level.
Two solutions:
(1) Modify the decorator of next_fn to be #tff.federated_computation(tff.federated_zip(federated_server_type_with_LR), federated_dataset_type)
(2) [preferred, to avoid this kind of issue] Do not create the type manually, and read it from initialize_fn instead. The decorator would be #tff.federated_computation(initialize_fn.type_signature.result, federated_dataset_type)

How can I construct function for client selection?

I am trying to customize to average the weights of the clients by seleceting some of the clients based on each client's sorted loss sum in this link.
def run_one_round(server_state, federated_dataset):
server_message = tff.federated_map(server_message_fn, server_state)
server_message_at_client = tff.federated_broadcast(server_message)
client_outputs = tff.federated_map(
client_update_fn, (federated_dataset, server_message_at_client))
weight_denom = client_outputs.client_weight
collected_output = tff.federated_collect(client_outputs) # append
round_model_delta = tff.federated_map(selecting_fn,(collected_output,weight_denom)) #apppend
server_state = tff.federated_map(server_update_fn,(server_state, round_model_delta))
round_loss_metric = tff.federated_mean(client_outputs.model_output, weight=weight_denom)
return server_state, round_loss_metric #append
#tff.tf_computation() # append
def selecting_fn(collected_output,weight_denom):
...
...
return round_model_delta
I'm trying to use tf.math.top_k for sorting and tf.compat.v1.metrics.mean for averaging.
But It doesn't work(TypError, ValueError...).
How can I construct selecting_fn and How to convert tensor to Federatedtype???

Resources