How to run a custom Slider - drake

I'm working on a simulation of a Soft Robot using the Piecewise Constant Curvature (PCC) assumption and representing each PCC segment with an Augmented Rigid Body Model (ARBM). For this I first would like to implement a manual slider for curvature control, i.e., a slider where I can input the two curvature parameters (Theta and Phi) and after mapping it to the ARBM via some known map m(Theta, Phi) showcase the robot in meshcat.
I'm already displaying the ARBM, however, am struggling to get the slider to run. As a result, I'd like to have something similar to the Let's get you a Robot notebook from the Manipulation course. So ideally a kinematic simulation in which I can set the curvatures to show the resulting ARBM configuration.
As of right now, my approach is the following:
Create a Custom Slider Class that is based on LeafSystem that creates two slides
Create a Transformation System based on a VectorSystem that applies the map m(Theta, Phi) to the output of the sliders yielding the ARBM joint states on its output
Set the Robot Joints to this State (This is where I'm struggling)
The problem seems to be that I cannot connect the desired output directly to the Joint positions in the same way that, e.g., the Joint sliders are. Is there a way to do this or should I follow a different approach?
See below for the code of the individual instances:
CurvatureSlider.py:
from dataclasses import dataclass
import numpy as np
from pydrake.systems.framework import LeafSystem
class CurvatureSliders(LeafSystem):
#dataclass
class SliderDefault:
"""Default values for the meshcat sliders."""
name: str
"""The name that is used to add / query values from."""
default: float
"""The initial value of the slider."""
_THETA = SliderDefault("Theta", 0.0)
_PHI = SliderDefault("Phi", 0.0)
def __init__(self, meshcat):
"""
#param meshcat The already created pydrake.geometry.Meshcat instance.
"""
LeafSystem.__init__(self)
self.DeclareVectorOutputPort("theta_phi", 2, self.DoCalcOutput)
self.meshcat = meshcat
# Curvature Control Sliders
self.meshcat.AddSlider(
name=self._THETA.name, min=-2.0 * np.pi,
max=2.0 * np.pi, step=0.01,
value=self._THETA.default)
self.meshcat.AddSlider(
name=self._PHI.name, min=-2.0 * np.pi,
max=2.0 * np.pi, step=0.01,
value=self._PHI.default)
def SetConfiguration(self, config: tuple):
"""
#param pose Tuple of floats that is ordered (Theta, Phi)
"""
assert(len(config) == 2)
self.meshcat.SetSliderValue(self._THETA.name, config[0])
self.meshcat.SetSliderValue(self._PHI.name, config[1])
def DoCalcOutput(self, context, output):
theta = self.meshcat.GetSliderValue(self._THETA.name)
phi = self.meshcat.GetSliderValue(self._PHI.name)
output.SetAtIndex(0, theta)
output.SetAtIndex(1, phi)
CC2ARBM.py:
import numpy as np
from pydrake.systems.framework import VectorSystem
class CC2ARBM(VectorSystem):
def __init__(self, L: float):
"""
#param L The length of the segment.
"""
VectorSystem.__init__(self, 2, 10)
# Define length of the segment
self._L = L
def DoCalcVectorOutput(self, context, u, x, y):
# Extract Input
theta = u[0]
phi = u[1]
# Compute ARBM equivalent configuration
b = 0.5 * self._L
eta = 0
if not theta == 0:
b = self._L / theta * np.sqrt(1
+ 4 * np.sin(0.5 * theta) / theta
* (np.sin(0.5 * theta) / theta)
- np.cos(0.5 * theta))
eta = np.arccos(1 / b * self._L / theta * np.sin(0.5 * theta))
print("Computed ARBM Joint Position")
# Aggreate Output
y.SetAtIndex(0, phi)
y.SetAtIndex(1, 0.5 * theta - eta)
y.SetAtIndex(2, b)
y.SetAtIndex(3, eta)
y.SetAtIndex(4, - phi)
y.SetAtIndex(5, phi)
y.SetAtIndex(6, eta)
y.SetAtIndex(7, b)
y.SetAtIndex(8, 0.5 * theta - eta)
y.SetAtIndex(9, - phi)
Main.py:
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
from CurvatureSliders import CurvatureSliders
from CC2ARBM import CC2ARBM
from pydrake.geometry import (
MeshcatVisualizerCpp,
MeshcatVisualizerParams,
Role,
StartMeshcat
)
from pydrake.math import (
RigidTransform,
RotationMatrix
)
from pydrake.multibody.meshcat import JointSliders
from pydrake.multibody.tree import FixedOffsetFrame
from pydrake.multibody.parsing import Parser
from pydrake.multibody.plant import AddMultibodyPlantSceneGraph
from pydrake.systems.analysis import Simulator
from pydrake.systems.framework import DiagramBuilder
from pydrake.systems.primitives import LogVectorOutput
def do_main(teleop: bool,
target_realtime_rate: float,
simulation_time: float,
max_time_step: float,
description_path: str) -> None:
# Start the visualizer.
# The cell will output an HTTP link after the execution.
# Click the link and a MeshCat tab should appear in your browser.
meshcat = StartMeshcat()
# Reset Meshcat Simulator
meshcat.Delete()
meshcat.DeleteAddedControls()
# Init the Diagram Builder
builder = DiagramBuilder()
# Note: the time_step here is chosen arbitrarily.
plant, scene_graph = AddMultibodyPlantSceneGraph(
builder, time_step=max_time_step)
# Load the files into the plant/scene_graph.
parser = Parser(plant)
# L - Mount
mount = parser.AddModelFromFile("../mount.sdf")
# Robot
model = parser.AddModelFromFile(description_path)
# Create an offset frame located half the link height above the origin
base_frame = plant.GetFrameByName("mount_base")
offset_frame = plant.AddFrame(
frame=FixedOffsetFrame(
name="offset_frame",
P=plant.world_frame(),
X_PF=RigidTransform(
R=RotationMatrix.Identity(),
p=np.array([0, 0, 0.5])
),
model_instance=None)
)
# Weld the base link to the offset frame
plant.WeldFrames(offset_frame, base_frame)
# Weld the robot base to the L-Mount
robot_base_frame = plant.GetFrameByName("robot_base")
robot_mounting_frame = plant.GetFrameByName("robot_location")
plant.WeldFrames(robot_mounting_frame, robot_base_frame)
# Finalize Plant
plant.Finalize()
#############################################################
# Post Plant Finalization Code #
#############################################################
# Set the Default states of the Joints
for i in plant.GetJointIndices(model):
j = plant.get_joint(i)
if j.num_positions() == 1:
j.set_default_positions([-0.2])
# Make the control inputs of the model an input to the diagram.
builder.ExportInput(plant.get_actuation_input_port())
# Add two visualizers, one to publish the "visual" geometry, and one to
# publish the "collision" geometry.
visual = MeshcatVisualizerCpp.AddToBuilder(
builder, scene_graph, meshcat,
MeshcatVisualizerParams(
role=Role.kPerception, prefix="visual")
)
collision = MeshcatVisualizerCpp.AddToBuilder(
builder, scene_graph, meshcat,
MeshcatVisualizerParams(role=Role.kProximity, prefix="collision"))
# Disable the collision geometry at the start; it can be enabled by the
# checkbox in the meshcat controls.
meshcat.SetProperty("collision", "visible", False)
# Setup Loggers
plant_logger = LogVectorOutput(plant.get_state_output_port(), builder)
# Joint Sliders Work like this
# sliders = builder.AddSystem(JointSliders(meshcat, plant))
# Add Curvature Sliders
curv_sliders = builder.AddSystem(CurvatureSliders(meshcat))
cc2arbm = builder.AddSystem(CC2ARBM(0.2))
# Connect the Sliders to the transformation block
builder.Connect(curv_sliders.get_output_port(0),
cc2arbm.get_input_port(0))
# Build the diagram
diagram = builder.Build()
# Start runnin the teleoperation
# Start Running the Slider similar to the Joint slider
# e.g. sliders.Run(diagram)
if __name__ == '__main__':
if len(sys.argv) <= 1:
do_main(target_realtime_rate=1,
simulation_time=10.0,
max_time_step=1.0e-3,
description_path="single_cc_segment.sdf")
else:
do_main(target_realtime_rate=float(sys.argv[1]),
simulation_time=float(sys.argv[2]),
max_time_step=float(sys.argv[3]),
description_path=sys.argv[4])

There are a few moving parts here. First, you say "kinematic simulation", but the demonstration in "Let's get you a robot" does not simulate (physics), it only visualizes the kinematics as set by the sliders. Assuming that is sufficient for your goal, then you could pass a callback into your sliders.Run method (as I do in the notebook corresponding to this chapter), and I believe that if you call plant.SetPositions in that callback, it should work?

I eventually ran the custom slider using a reimplementation of JointSlider.Run(...). For me, the following was sufficient (Two planar constant curvature segments, represented by 4 rigid joints each):
from dataclasses import dataclass
import numpy as np
import functools
import operator
import logging
import time
from typing import List, Tuple
from pydrake.systems.framework import LeafSystem
class CurvatureSliders(LeafSystem):
#dataclass
class SliderDefault:
"""Default values for the meshcat sliders."""
name: str
"""The name that is used to add / query values from."""
default: float
"""The initial value of the slider."""
_Q1 = SliderDefault("q1", 0.0)
_Q2 = SliderDefault("q2", 0.0)
def __init__(self, meshcat, plant, L=0.2):
"""
#param meshcat The already created pydrake.geometry.Meshcat instance.
#param plant The plant the sliders are connected to
#param L the restlength of the segment
"""
# Call Super Constructor
LeafSystem.__init__(self)
# Declare System Output
output = self.DeclareVectorOutputPort(
"q1_q2", 2, self.DoCalcOutput)
output.disable_caching_by_default()
# Init Class Variables
self._meshcat = meshcat
self._plant = plant
self._L = L
# Curvature Control Sliders
self._meshcat.AddSlider(
name=self._Q1.name, min=-2.0 * np.pi,
max=2.0 * np.pi, step=0.1,
value=self._Q1.default)
self._meshcat.AddSlider(
name=self._Q2.name, min=-2.0 * np.pi,
max=2.0 * np.pi, step=0.1,
value=self._Q2.default)
def SetConfiguration(self, q: Tuple):
"""
#param q configuration for each CC segment descriped by (q1, q2)
"""
self._meshcat.SetSliderValue(self._Q1.name, q[0])
self._meshcat.SetSliderValue(self._Q2.name, q[1])
def CC2AGRB(self, q: Tuple) -> List[float]:
# Extract input
q1 = q[0]
q2 = q[1]
# Compute ARBM equivalent configuration
config1 = [0, 0.5 * self._L, 0.5 * self._L, 0]
config2 = [0, 0.5 * self._L, 0.5 * self._L, 0]
if q1 != 0:
config1 = [
0.5 * q1,
self._L * np.sin(0.5 * q1) / q1,
self._L * np.sin(0.5 * q1) / q1,
0.5 * q1
]
if q2 != 0:
config2 = [
0.5 * q2,
self._L * np.sin(0.5 * q2) / q2,
self._L * np.sin(0.5 * q2) / q2,
0.5 * q2
]
return functools.reduce(operator.iconcat, [config1, config2], [])
def DoCalcOutput(self, context, output):
q1 = self._meshcat.GetSliderValue(self._Q1.name)
q2 = self._meshcat.GetSliderValue(self._Q2.name)
output.SetAtIndex(0, q1)
output.SetAtIndex(1, q2)
def Run(self, diagram, timeout=1e5):
# Get all the contextes
root_context = diagram.CreateDefaultContext()
sliders_context = self.GetMyContextFromRoot(root_context)
plant_context = self._plant.GetMyMutableContextFromRoot(root_context)
# Add Stop Button
kButtonName = "Stop Curvature Sliders"
logging.info("Press the '{}' button in Meshcat to continue.",
kButtonName)
self._meshcat.AddButton(kButtonName)
# Greb current time to implement the timeout
t0 = time.time()
# Loop until the button is clicked, or
# the timeout (when given) is reached.
diagram.Publish(root_context)
while self._meshcat.GetButtonClicks(kButtonName) < 1:
# Break out of loop if timeout elapsed
elapsed_t = time.time() - t0
if elapsed_t >= timeout:
break
# If the sliders have not changed, avoid invalidating the context.
old_positions = self._plant.GetPositions(plant_context)
new_positions = self.CC2AGRB(
self.get_output_port().Eval(sliders_context))
if (np.abs(new_positions - old_positions) < 0.001).all():
time.sleep(0.01)
continue
# Publish the new positions.
self._plant.SetPositions(plant_context, new_positions)
diagram.Publish(root_context)
And then just add them using the builder and calling the Run() function.

Related

my LSTM time-series forecast predictions have a shift to the future

I am trying to make predcitions on a time-series data (univariate) using Tensorflow. However, when I see the results, the predictions have a time shift (please see the figure). Anyone ideas what the problem is?
Below is the function I use to prepare the set of inputs and outputs (X and Y) as well as normalizing the data
# define a fucntion to prepare the X and Y for training the model
def df_to_X_y_group(df_as_np, window_size, num): # input is numpy array. j_start and j_end show the features, num is the number of wavelength in a group
X=[] # make an empty list
y=[]
for j in range(0,df_as_np.shape[1]-num,num): #(start, stop, step). defining a window over features of size num wihtout any overlaps
for i in range(len(df_as_np)-window_size): # there is overlap between windows in time
row=[a for a in df_as_np[i:i+window_size, j:j+num]]
X.append(row)
label=[r for r in df_as_np[i+window_size, j:j+num]]
y.append(label)
return np.array(X), np.array(y)
# convert the data to X and y format
window_size_group=50
output_group=2
num=2
X_group , y_group= df_to_X_y_group(df_as_np, window_size_group, num) # 70085 rows (number of training samples), 6 is the size of the window (number of time steps) and 5 is the number of features
X_group.shape , y_group.shape
# Split the data to train, val and test by 70,20 and 10% of the data
X_train_group , y_train_group = X_group[:int(.7 * X_group.shape[0])], y_group[:int(.7 * X_group.shape[0])]
X_val_group , y_val_group = X_group[int(.7 * X_group.shape[0]):int(.9 * X_group.shape[0])] , y_group[int(.7 * X_group.shape[0]):int(.9 * X_group.shape[0])]
X_test_group , y_test_group = X_group[int(.9 * X_group.shape[0]):] , y_group[int(.9 * X_group.shape[0]):]
# Normalize the data
X_train_mean_group = np.mean(X_train_group)
X_train_std_group = np.std(X_train_group)
def preprocess_group(X):
X_norm = (X - X_train_mean_group) / X_train_std_group
return X_norm
# Now convert the data back to before normalization
def postprocess_group(pred):
actual = (pred * X_train_std_group) + X_train_mean_group
return actual
# apply the preprocess to the inputs and outputs
X_train_norm_group=preprocess_group(X_train_group)
X_val_norm_group=preprocess_group(X_val_group)
X_test_norm_group=preprocess_group(X_test_group)
y_train_norm_group=preprocess_group(y_train_group)
y_val_norm_group=preprocess_group(y_val_group)
y_test_norm_group=preprocess_group(y_test_group)
X_train_group.shape , y_train_group.shape , X_val_group.shape , y_val_group.shape , X_test_group.shape , y_test_group .shape
Here is the model:
# Build the model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import ModelCheckpoint # to save the model
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.losses import Huber
from tensorflow.keras.metrics import RootMeanSquaredError
from tensorflow.keras.optimizers import Adam
model_group=Sequential()
model_group.add(InputLayer((X_group.shape[1],X_group.shape[2]))) # size of the input (windows (time steps) of size 6 and 1024 features )
model_group.add(LSTM(128))
model_group.add(Dense(20, 'relu'))
model_group.add(Dense(output_group))
model_group.summary()
# Train the model
cp2=ModelCheckpoint('model/group/overlapnum2' , save_best_only=True) # it saves the best model based on val error
model_group.compile(loss=MeanSquaredError(), optimizer=Adam(learning_rate=0.0001), metrics=[RootMeanSquaredError()])
model_group.fit(X_train_norm_group, y_train_norm_group, validation_data=(X_val_norm_group, y_val_norm_group), epochs=15, , callbacks=[cp2])
Below is the function to predict and plot the data:
# We make a function which predict and plot
from sklearn.metrics import mean_squared_error as mse
def plot_predictions(model, X, y, start=0, end=100):
predictions = model.predict(X[start:end])
predictions_actual=postprocess(predictions)
y_actual=postprocess(y[start:end])
plt.plot(predictions_actual, c='r', label='Predictions')
plt.plot(y_actual, label= 'Actuals')
plt.xlabel("Order of the data (TIme)")
plt.ylabel("Number of Photons")
plt.legend() # so the label shows up
return mse(y_actual, predictions_actual)
# plot
plot_predictions(model, X_test_norm, y_test_norm, start=0, end=300)
Here is the predcitions results which are shifted in time (for the true results, y(t) should be replaced with y(t+1)):
enter image description here
I have tried tuning several hyperparameters such as batch size, window size, or adding layers but none of these helped.

In ROS turtlesim how to move turtle in sine curve

In ROS turtlesim, how can we move turtle in sine path? I know we need to use proportional controller to achieve this. But am not getting actual method to do so. I have attached the code for the same which i have tried till now
Note: in callback function i have converted 0 to 2pi scale to -pi to pi scale which is used in ros
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
PI = 3.1415926535897
import math
# Initial value of theta is 0
theta = 0
# Subscriber callback function
def pose_callback(pose):
global theta
req = 2 * math.pi
if pose.theta < 0:
alpha = req - (pose.theta + (2 * math.pi))
else:
alpha = req - pose.theta
alpha = 2 * math.pi - alpha
theta = alpha
# sin_graph function
def sin_graph():
# Starts a new node
global theta
rospy.init_node('sin_graph', anonymous=True)
# Initialization of publisher
velocity_publisher = rospy.Publisher(
'/turtle1/cmd_vel', Twist, queue_size=10)
# Subscribing to topic Pose
rospy.Subscriber("/turtle1/pose", Pose, pose_callback)
vel_msg = Twist()
# Initializing basic data
speed = 0.2
radius = 1
vel_msg.linear.x = speed
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = speed/radius
# Rate at which message is published (10 times per second)
rate = rospy.Rate(10)
# Loop until current distance is re-initialized to zero(theta = 0)
while not rospy.is_shutdown():
vel_msg.linear.x = speed * math.cos(theta)
vel_msg.angular.z = math.sin(theta)
velocity_publisher.publish(vel_msg)
rospy.loginfo("Moving in a sine curve")
print(theta)
rate.sleep()
# Forcing our robot to stop
print("Goal Reached")
vel_msg.linear.x = 0
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
rospy.spin()
if __name__ == '__main__':
try:
# Testing our function
sin_graph()
except rospy.ROSInterruptException:
pass

How to publish odom (nav_msgs/Odometry) from MD49 encoders outputs?

I am using MD49 Motor Drive with its motors
https://www.robot-electronics.co.uk/htm/md49tech.htm
http://wiki.ros.org/md49_base_controller
How to subscribe (encoder_l and encoder_r) from md49_base_controller package and publish (vx , vy ,and vth ) as a form odom (nav_msgs/Odometry) ?
There are two problem:
1-The first is that the encoders outputs are not correct "the package needs to be modified.
2-The second is the I want to create a package that subscribe the right and left wheels encoder counts (encoder_l and encoder_r) and publish (vx , vy ,and vth) as a form odom (nav_msgs/Odometry) to be compatable wth imu MPU9250
http://wiki.ros.org/robot_pose_ekf
The proposed package is:
1- We have to convert (encoder_l and encoder_r) to (RPM_l and RPM_r) as follow:
if (speed_l>128){newposition1 = encoder_l;}
else if (speed_l<128){ newposition1 = 0xFFFFFFFF-encoder_l;}
else if (speed_l==128) {newposition1=0;}
newtime1 = millis();
RPM_l = ((newposition1-oldposition1)*1000*60)/((newtime1-oldtime1)*980);
oldposition1 = newposition1;
oldtime1 = newtime1;
delay(250);
if (speed_r>128){ newposition2 = encoder_r;}
else if (speed_r<128){ newposition2 = 0xFFFFFFFF-encoder_r;}
else if (speed_r==128) { newposition2=0;}
newtime2 = millis();
RPM_r = ((newposition2-oldposition2)*1000*60)/((newtime2-oldtime2)*980);
oldposition2 = newposition2;
oldtime2= newtime2;
delay(250);
2- We have to convert (RPM_l and RPM_r) to (vx, vy, and vth) as follow:
vx=(r/2)*RPM_l*math.cos(th)+(r/2)*RPM_r*math.cos(th);
vx=(r/2)*RPM_l*math.sin(th)+(r/2)*RPM_r*math.sin(th);
vth=(r/B)*omega_l-(r/B)*omega_r;
Hint: r and B are wheel radius and vehicle width respectively.
3- The odom (nav_msgs/Odometry) package is:
#!/usr/bin/env python
import math
from math import sin, cos, pi
import rospy
import tf
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
from md49_messages.msg import md49_encoders
rospy.init_node('odometry_publisher')
odom_pub = rospy.Publisher("odom", Odometry, queue_size=50)
odom_broadcaster = tf.TransformBroadcaster()
x = 0.0
y = 0.0
th = 0.0
vx =0.1
vy = -0.1
vth = 0.1
current_time = rospy.Time.now()
last_time = rospy.Time.now()
r = rospy.Rate(1.0)
while not rospy.is_shutdown():
current_time = rospy.Time.now()
# compute odometry in a typical way given the velocities of the robot
dt = (current_time - last_time).to_sec()
delta_x = (vx * cos(th) - vy * sin(th)) * dt
delta_y = (vx * sin(th) + vy * cos(th)) * dt
delta_th = vth * dt
x += delta_x
y += delta_y
th += delta_th
# since all odometry is 6DOF we'll need a quaternion created from yaw
odom_quat = tf.transformations.quaternion_from_euler(0, 0, th)
# first, we'll publish the transform over tf
odom_broadcaster.sendTransform(
(x, y, 0.),
odom_quat,
current_time,
"base_link",
"odom"
)
# next, we'll publish the odometry message over ROS
odom = Odometry()
odom.header.stamp = current_time
odom.header.frame_id = "odom"
# set the position
odom.pose.pose = Pose(Point(x, y, 0.), Quaternion(*odom_quat))
# set the velocity
odom.child_frame_id = "base_link"
odom.twist.twist = Twist(Vector3(vx, vy, 0), Vector3(0, 0, vth))
# publish the message
odom_pub.publish(odom)
last_time = current_time
r.sleep()
The problem was in serial comunication setup not for the code.
Setup UART on the raspi 3 GPIO
For some strange reason the default for Pi3 using the latest 4.4.9 kernel is to DISABLE UART. To enable it you need to change enable_uart=1 in /boot/config.txt. (There is no longer necessary to add core_freq=250 to fix the core frequency to get stable baudrate.)
If you don’t use Bluetooth (or have undemanding uses) it is possible to swap the ports back in the Device Tree. There is a pi3-miniuart-bt and pi3-disable-bt module which are described in /boot/overlays/README here.
As mentioned, by default the new GPIO UART is not enabled so the first thing to do is to edit the config.txt file to enable the serial UART:
sudo nano /boot/config.txt
and add the line (at the bottom):
enable_uart=1
You have to reboot for the changes to take effect.
To check where your serial ports are pointing you can use the list command with the long “-l” listing format:
ls -l /dev
In the long listing you will find:
serial0 -> ttyS0
serial1 -> ttyAMA0
Thus on a Raspberry Pi 3 and Raspberry Pi Zero W, serial0 will point to GPIO J8 pins 8 and 10 and use the /dev/ttyS0. On other Raspberry Pi’s it will point to /dev/ttyAMA0.
So where possible refer to the serial port via it’s alias of “serial0” and your code should work on both Raspberry Pi 3 and other Raspberry Pi’s.
You also need to remove the console from the cmdline.txt. If you edit this with:
sudo nano /boot/cmdline.txt
you will see something like this:
dwc_otg.lpm_enable=0 console=serial0,115200 console=tty1 root=/dev/mmcblk0p2 rootfstype=ext4 elevator=deadline fsck.repair=yes root wait
remove the line: console=serial0,115200 and save and reboot for changes to take effect:
reboot
First off, you need to import nav_msgs/Odometry as the following:
from nav_msgs.msg import Odometry
You must have a function that performs those conversions and then in rospy.Subscriber import those variables, like this:
def example(data):
data.vx=<conversion>
data.vth=<conversion>
def listener():
rospy.Subscriber('*topic*', Odometry, example)
rospy.spin()
if __name__ == 'main':
listener()
I think this would work

How to count vehicles using opencv in python?

I am working on a VCS (vehicle counting system) project. The scope of the project is to classify and count vehicles. I have built a custom model using Faster-RCNN in Tensorflow-object-detection-API This model only contains 7 classes such as car motorbike, bicycle and etc. The model works perfectly, But, the problem is "COUNTING". It is very hard to count vehicles in video frame. I did a pre-research on the internet. I tried a lot. but i could not find any useful information. There are some projects on github, they use tracking methods.
I want the following things. I want to draw an horizontal line in the frame. when the vehicle touch it, the counting should take place. How to do it. I don't know the algorithm behind it. I heard that centroid tracking would help me.
My question is, i want to count vehicles when it touch the horizontal line. I have linked a sample image bellow.
Sample_Image
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
VIDEO_NAME = 'Video_105.mp4'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to video
PATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 7
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Open video file
video = cv2.VideoCapture(PATH_TO_VIDEO)
while(video.isOpened()):
# Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
ret, frame = video.read()
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.90)
# All the results have been drawn on the frame, so it's time to display it.
final_score = np.squeeze(scores)
count = 0
cv2.line(frame, (1144, 568), (1723,664), (0,0,255), 2) #Line
for i in range(100):
if scores is None or final_score[i] > 0.90:
min_score_thresh = 0.90
bboxes = boxes[scores > min_score_thresh]
im_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
im_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
final_box = []
for box in bboxes:
ymin, xmin, ymax, xmax = box
print("Ymin:{}:Xmin:{}:Ymax:{}Xmax{}".format(ymin*im_width,xmin*im_width,ymax*im_width,xmax*im_width))
final_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height])
#print(final_box)
cv2.imshow('Object detector', frame)
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
# Clean up
video.release()
cv2.destroyAllWindows()
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2
tracker = cv2.TrackerCSRT_create()
vs = cv2.VideoCapture("Video.mp4")
initBB = None
detec = []
def pega_centro(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return cx,cy
roi = 480
counter = 0
offset = 6
# loop over frames from the video stream
while vs.isOpened():
ret,frame = vs.read()
cv2.line(frame, (769 , roi), (1298 , roi), (255,0,0), 3)
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
cX = int((x + x+w) / 2.0)
cY = int((y + y+h) / 2.0)
cv2.circle(frame, (cX, cY), 3, (0, 0, 255), -1)
c=pega_centro(x, y, w, h)
detec.append(c)
for (x,y) in detec:
if y<(roi+offset) and y>(roi-offset):
counter+=1
print(counter)
cv2.line(frame, (769 , roi), (1298 , roi), (0,0,255), 3)
detec.remove((x,y))
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
else:
vs.release()
cv2.destroyAllWindows()

Unable to write gradient step in theano for rnn

I have following code in which I convert words to one hot vectors and do a gradient descent in theano using rnn for predicting next words given a sequence of words(basically a language model).
# coding: utf-8
# In[68]:
#Importing stuff
import theano
import theano.tensor as T
import numpy as np
# In[69]:
import nltk
import sys
import operator
import csv
import itertools
from utils import *
from datetime import datetime
# In[70]:
#Fixing vocabulary size for one hot vectors and some initialization stuff
v_size = 8000
unknown_token = "UNKNOWN_TOKEN"
start_token = "<s>"
end_token = "</s>"
# In[71]:
#Read data and start preprocessing
with open('reddit-comments-2015-08.csv','rb') as f:
reader = csv.reader(f, skipinitialspace=True)
reader.next()
sentences = list(itertools.chain(*[nltk.sent_tokenize(x[0].decode('utf-8')) for x in reader]))
print len(sentences)
# In[72]:
#Tokenize the sentences and add start and end tokens
tokenized_sentences = [nltk.word_tokenize(s) for s in sentences]
tokenized_sentences = [[start_token] + s + [end_token] for s in tokenized_sentences]
# In[73]:
#Get word frequencies and use only most frequent words in vocabulary
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
vocab = word_freq.most_common(v_size-1)
# In[74]:
#Do mapping and reverse mapping
index_to_word = [x[0] for x in vocab]
index_to_word.append(unknown_token)
word_to_index = {w:i for i,w in enumerate(index_to_word)}
#Removing less frequent words
for i, s in enumerate(tokenized_sentences):
tokenized_sentences[i] = [w if w in word_to_index else unknown_token for w in s]
#Got vectors but they are not one hot
X_train = np.asarray([[word_to_index[w] for w in s[:-1]] for s in tokenized_sentences])
Y_train = np.asarray([[word_to_index[w] for w in s[1:]] for s in tokenized_sentences])
#Preprocessing ends here
# In[75]:
#Take only one sentence for now
X_train = X_train[0]
Y_train = Y_train[0]
# In[76]:
#Make input and output as onehot vectors. This can easily be replaced with vectors generated by word2vec.
X_train_onehot = np.eye(v_size)[X_train]
X = theano.shared(np.array(X_train_onehot).astype('float32'), name = 'X')
Y_train_onehot = np.eye(v_size)[Y_train]
Y = theano.shared(np.array(Y_train_onehot).astype('float32'), name = 'Y')
# In[77]:
#Initializing U, V and W
i_dim = v_size
h_dim = 100
o_dim = v_size
U = theano.shared(np.random.randn(i_dim, h_dim).astype('float32'), name = 'U')
W = theano.shared(np.random.randn(h_dim, h_dim).astype('float32'), name = 'W')
V = theano.shared(np.random.randn(h_dim, o_dim).astype('float32'), name = 'V')
# In[78]:
#forward propagation
s = T.vector('s')
results, updates = theano.scan(lambda x, sm1: T.tanh( T.dot(x, U) + T.dot(sm1, W)),
sequences = X_train_onehot,
outputs_info = s
)
y_hat = T.dot(results, V)
forward_propagation = theano.function(inputs=[s], outputs = y_hat)
# In[80]:
#loss
loss = T.sum(T.nnet.categorical_crossentropy(y_hat, Y))
# In[81]:
#Gradients
dw = T.grad(loss, W)
du = T.grad(loss, U)
dv = T.grad(loss, V)
# In[82]:
#BPTT
learning_rate = T.scalar('learning_rate')
gradient_step = theano.function(inputs = [s, learning_rate],
updates = (
(U, U - learning_rate * du),
(V, V - learning_rate * dv),
(W, W - learning_rate * dw)
)
)
# In[ ]:
But it keeps throwing error at gradient step. I am posting full code because I don't know which step is affecting the error. The following is the screenshot of error in jupyter notebook.
I solved it. The problem is with mismatch of types. I had to typecast du, dv, dw, learning rate to float32. By default, they are float64.

Resources