Recording multiple meshcat visualizers - drake

I would like to record multiple meshcat visualizers with different prefix ids so I can eventually save them to an HTML file that I can interact with later. I would like the different visualizations to play on top of each other and to be able to select/unselect different visualizations.
Here is a minimal replication of the problem. I expect there to be two cartpole visualizations that are on top of each other. However, I only end up seeing one of them being recorded. The other cartpole seems stuck at the end position when I play back the recording
import meshcat
from pydrake.common import FindResourceOrThrow
​
from pydrake.multibody.plant import (
AddMultibodyPlantSceneGraph)
from pydrake.multibody.parsing import Parser
from pydrake.systems.analysis import Simulator
from pydrake.systems.framework import DiagramBuilder
from pydrake.systems.meshcat_visualizer import (
ConnectMeshcatVisualizer,
)
​
v = meshcat.Visualizer()
file_name = FindResourceOrThrow(
"drake/examples/multibody/cart_pole/cart_pole.sdf")
builder = DiagramBuilder()
cart_pole, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0)
Parser(plant=cart_pole).AddModelFromFile(file_name)
cart_pole.Finalize()
​
vis = ConnectMeshcatVisualizer(builder=builder,
prefix="vis",
scene_graph=scene_graph,
open_browser=False)
​
vis2 = ConnectMeshcatVisualizer(
builder=builder,
prefix="vis2",
scene_graph=scene_graph,
open_browser=False)
vis2.set_name("vis2")
​
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
​
cart_pole_context = diagram.GetMutableSubsystemContext(
cart_pole, diagram_context)
cart_pole.get_actuation_input_port().FixValue(cart_pole_context, .02)
​
simulator = Simulator(diagram, diagram_context)
simulator.set_publish_every_time_step(False)
vis.start_recording()
vis2.start_recording()
simulator.AdvanceTo(100)
vis.stop_recording()
vis2.stop_recording()
​
vis.publish_recording()
vis2.publish_recording()
​
# f = open("saved.html", "w")
# f.write(v.static_html())
# f.close()

Here is one solution. I don't love it, but it accomplishes the stated goal. Note the changes:
saving and publishing only one animation
naming the model in the MultibodyPlant with a distinct name for each sim
setting delete_prefix_on_load=False so it doesn't clear the old geometry
import meshcat
from pydrake.common import FindResourceOrThrow
from pydrake.multibody.plant import (AddMultibodyPlantSceneGraph)
from pydrake.multibody.parsing import Parser
from pydrake.systems.analysis import Simulator
from pydrake.systems.framework import DiagramBuilder
from pydrake.systems.meshcat_visualizer import (
ConnectMeshcatVisualizer,
)
animation = None
for i in range(3):
file_name = FindResourceOrThrow(
"drake/examples/multibody/cart_pole/cart_pole.sdf")
builder = DiagramBuilder()
cart_pole, scene_graph = AddMultibodyPlantSceneGraph(builder, 0.0)
Parser(plant=cart_pole).AddModelFromFile(file_name, f"plant{i}")
cart_pole.Finalize()
vis = ConnectMeshcatVisualizer(builder=builder,
scene_graph=scene_graph,
delete_prefix_on_load=False,
open_browser=False)
if animation:
vis._animation = animation
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
cart_pole_context = diagram.GetMutableSubsystemContext(
cart_pole, diagram_context)
cart_pole.get_actuation_input_port().FixValue(cart_pole_context, .02)
simulator = Simulator(diagram, diagram_context)
simulator.set_publish_every_time_step(False)
vis.start_recording()
simulator.AdvanceTo(10)
vis.stop_recording()
animation = vis._animation
vis.publish_recording()

Related

NameError: name 'FileCheckpointManager' is not defined while saving model

after simulating this code of federated learning for image classification, I would like to save my model so I add this two lines
ckpt_manager = FileCheckpointManager("model.h5")
ckpt_manager.save_checkpoint(ServerState.from_anon_tuple(state), round_num=2)
So here is all my code:
import collections
import time
import tensorflow as tf
tf.compat.v1.enable_v2_behavior()
import tensorflow_federated as tff
source, _ = tff.simulation.datasets.emnist.load_data()
def map_fn(example):
return collections.OrderedDict(
x=tf.reshape(example['pixels'], [-1, 784]), y=example['label'])
def client_data(n):
ds = source.create_tf_dataset_for_client(source.client_ids[n])
return ds.repeat(10).shuffle(500).batch(20).map(map_fn)
train_data = [client_data(n) for n in range(10)]
element_spec = train_data[0].element_spec
def model_fn():
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(784,)),
tf.keras.layers.Dense(units=10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
return tff.learning.from_keras_model(
model,
input_spec=element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
trainer = tff.learning.build_federated_averaging_process(
model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.02))
....
NUM_ROUNDS = 11
for round_num in range(2, NUM_ROUNDS):
state, metrics = trainer.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
ckpt_manager = FileCheckpointManager("model.h5")
ckpt_manager.save_checkpoint(ServerState.from_anon_tuple(state), round_num=9)
But this error does appear:
NameError: name 'FileCheckpointManager' is not defined
I will appreciate it if you told me how I solve this problem
Looks like the code is missing an import for the module with the checkpoint manager.
FileCheckpointManger is defined in the checkpoint_manager module here: tensorflow_federated/python/research/utils/checkpoint_manager.py.
Try adding an import at the top of the file like this (the following example assumes the tensorflow federated github repo is in the import search path):
from tensorflow_federated.python.research.utils import checkpoint_manager
# ...
ckpt_manager = checkpoint_manager.FileCheckpointManager("model.h5")

OpenAI gym breakout-ram-v4 unable to learn

I am using Q learning and the program should be able to play the game after some tries but it is not learning even when the epsilon value if 0.1.
I have tried changing the batch size the memory size. I have changed the code to give -1 reward if the player dies.
import gym
import numpy as np
import random
import tensorflow as tf
import numpy as np
from time import time
import keyboard
import sys
import time
env = gym.make("Breakout-ram-v4")
observationSpace = env.observation_space
actionSpace= env.action_space
episode = 500
class Model_QNN :
def __init__(self):
self.memory = []
self.MAX_MEMORY_TO_USE = 60_000
self.gamma = 0.9
self.model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(128,1)),
tf.keras.layers.Dense(256,activation="relu"),
tf.keras.layers.Dense(64,activation="relu"),
tf.keras.layers.Dense(actionSpace.n , activation= "softmax")
])
self.model.compile(optimizer="adam",loss="mse",metrics=["accuracy"])
def remember(self, steps , done):
self.memory.append([steps,done])
if(len(self.memory) >= self.MAX_MEMORY_TO_USE):
del self.memory[0]
def replay(self,batch_size= 32):
states, targets_f = [], []
if(len(self.memory)< batch_size) :
return
else:
mini = random.sample(self.memory,batch_size)
states ,targets = [], []
for steps , done in mini :
target= steps[2] ;
if not done :
target = steps[2] + (self.gamma* np.amax(self.model.predict(steps[3].reshape(1,128,1))[0]))
target_f = self.model.predict(steps[0].reshape(1,128,1))
target_f[0][steps[1]] = target
states.append(steps[0])
targets.append(target_f[0])
self.model.fit(np.array(states).reshape(len(states),128,1), np.array(targets),verbose=0,epochs=10)
def act(self,state,ep):
if(random.random()< ep):
action = actionSpace.sample()
else :
np.array([state]).shape
action= self.model.predict(state.reshape(1,128,1))
action = np.argmax(action)
return action;
def saveModel (self):
print("Saving")
self.model.save("NEWNAMEDONE")
def saveBackup(self,num):
self.model.save("NEWNAME"+str(int(num)))
def main():
agent= Model_QNN();
epsilon=0.9
t_end = time.time()
score= 0
for e in range(2000):
print("Working on episode : "+str(e)+" eps "+str(epsilon)+" Score " + str(score))
preState = env.reset()
preState,reward,done,_ = env.step(1)
mainLife=5
done = False
score= 0
icount = 0
render=False
if e % 400 ==0 and not e==0:
render =True
while not done:
icount+=1
if render:
env.render()
if keyboard.is_pressed('q'):
agent.saveBackup(100)
agent.saveModel()
quit()
rewrd=0
if ( _["ale.lives"] < mainLife ):
mainLife-=1
rewrd=-1
action=1
else:
action = agent.act(preState,epsilon)
newState,reward,done,_ = env.step(action)
if rewrd ==-1 :
reward =-1
agent.remember([preState/255,action,reward,newState/255],done);
preState= newState;
score+=reward
if done :
break
agent.replay(1024)
if epsilon >= 0.18 :
epsilon = epsilon * 0.995;
if ((e+1)%500==0):
agent.saveBackup((e+1)/20)
agent.saveModel()
if __name__=='__main__':
main()
There is no error message the program should learn and it is not
Why are you using Softmax on your output layer?
If you want to use Softmax use Cross-Entropy as your loss. However, it looks like you're trying to implement a value based learning system. The activation function on your output layer should be linear.
I suggest you try your implementation on Cartpole-v0 then LunarLanding-v2 first.
Those are solved environments and a great place to sanity check your code.
"There is no error message the program should learn and it is not."
Welcome to ML where things fail silently.

Tesseract fails to parse text from image

I'm completely new to opencv and tesseract.
I spent all day trying to make code that would parse game duration from images like that: original image (game duration is in the top left corner)
I came to code that manages to recognize the duration sometimes (about 40% of all cases). Here it is:
try:
from PIL import Image
except ImportError:
import Image
import os
import cv2
import pytesseract
import re
import json
def non_digit_split(s):
return filter(None, re.split(r'(\d+)', s))
def time_to_sec(min, sec):
return (int(min) * 60 + int(sec)).__str__()
def process_img(image_url):
img = cv2.resize(cv2.imread('./images/' + image_url), None, fx=5, fy=5, interpolation=cv2.INTER_CUBIC)
str = pytesseract.image_to_string(img)
if "WIN " in str:
time = list(non_digit_split(str.split("WIN ",1)[1][0:6].strip()))
str = time_to_sec(time[0], time[2])
else:
str = 'Not recognized'
return str
res = {}
img_list = os.listdir('./images')
print(img_list)
for i in img_list:
res[i] = process_img(i)
with open('output.txt', 'w') as file:
file.write(json.dumps(res))
Don't even ask how I came to resizing image, but it helped a little.
I also tried to crop image first like that:
cropped image
but tesseract couldn't find any text here.
I'm sure that the issue I'm trying to solve is pretty easy. Can you please point me the right direction? How should I preprocess it so tesseract will parse it right?
Thanks to #DmitriiZ comment I managed to produce working piece of code.
I made a preprocessor that outputs something like that:
Preprocessed image
Tesseract handles it just fine.
Here is the full code:
try:
from PIL import Image
except ImportError:
import Image
import os
import pytesseract
import json
def is_dark(image):
pixels = image.getdata()
black_thresh = 100
nblack = 0
for pixel in pixels:
if (sum(pixel) / 3) < black_thresh:
nblack += 1
n = len(pixels)
if (nblack / float(n)) > 0.5:
return True
else:
return False
def preprocess(img):
basewidth = 500
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
#Enlarging image
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
#Converting image to black and white
img = img.convert("1", dither=Image.NONE)
return img
def process_img(image_url):
img = Image.open('./images/' + image_url)
#Area we need to crop can be found in one of two different areas,
#depending on which team won. You can replace that block and is_dark()
#function by just img.crop().
top_area = (287, 15, 332, 32)
crop = img.crop(top_area)
if is_dark(crop):
bot_area = (287, 373, 332, 390)
crop = img.crop(bot_area)
img = preprocess(crop)
str = pytesseract.image_to_string(img)
return str
res = {}
img_list = os.listdir('./images')
print(img_list)
for i in img_list:
res[i] = process_img(i)
with open('output.txt', 'w') as file:
file.write(json.dumps(res))

Ardrone motion calibration based on feet-based distance

I have been working on ARDrone long ago. Is there any way to automate the drone based on feet measurements ?
Lets say I have to fly the drone only 6 feet from the ground. How can I do that ?
I do have some rostopics. How to use them to fly the drone only for 6 feet ?
#flyup
rostopic : /cmd_vel geometry_msgs/Twist '{linear: {x: 0.0, y: 0.0, z: 1.0}, angular: {x: 0.0,y: 0.0,z: 0.0}}'
I am using a loop here to implement the same action for couple of times. How can I calibrate it to 6 ft with rostopics ?
Please check the code.
#!/usr/bin/env python
#import library ros
import rospy
import time
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from std_msgs.msg import Empty
from ardrone_autonomy.msg import Navdata
#import class status untuk menentukan status ddari quadcopter
#from drone_status import DroneStatus ` 1`
#COMMAND_PERIOD = 1000
class AutonomousFlight():
def __init__(self):
self.status = ""
rospy.init_node('forward', anonymous=False)
self.rate = rospy.Rate(1)
self.pubTakeoff = rospy.Publisher("ardrone/takeoff",Empty, queue_size=10)
self.pubLand = rospy.Publisher("ardrone/land",Empty, queue_size=10)
self.pubCommand = rospy.Publisher('cmd_vel',Twist, queue_size=10)
self.command = Twist()
#self.commandTimer = rospy.Timer(rospy.Duration(COMMAND_PERIOD/1000.0),self.SendCommand)
self.state_change_time = rospy.Time.now()
rospy.on_shutdown(self.SendLand)
def TakeOff(self):
self.pubTakeoff.publish(Empty())
self.rate.sleep()
def SendLand(self):
self.pubLand.publish(Empty())
def SetCommand(self, linear_x, linear_y, linear_z, angular_x, angular_y, angular_z):
self.command.linear.x = linear_x
self.command.linear.y = linear_y
self.command.linear.z = linear_z
self.command.angular.x = angular_x
self.command.angular.y = angular_y
self.command.angular.z = angular_z
self.pubCommand.publish(self.command)
self.rate.sleep()
if __name__ == '__main__':
try:
i = 0
distance = 0
uav = AutonomousFlight()
while not rospy.is_shutdown():
uav.TakeOff()
if i <=3 :
uav.SetCommand(0,0,1,0,0,0)
i+=1
elif i<=4:
uav.SetCommand(0,0,0,0,0,0)
i+=1
else:
break
uav.SendLand()
except rospy.ROSInterruptException:
pass
From the above code, Drone has just to takeoff and fly upto 6 feet ...how could I do it ?
Regards,
Kathir

Code does nothing when i run it (ROS, opencv)

So here's my problem. I'm using ROS and opencv, trying to create a depth map using two cameras. However, the code that i wrote, does not seem to do anything and im a little confused about why. (previous codes that i've been running had the same structure)
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('test_cam')
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import message_filters
class image_converter:
def __init__(self):
self.bridge = CvBridge()
self.image_sub_1 = message_filters.Subscriber("/cameras/left_hand_camera/image",Image)
self.image_sub_2 = message_filters.Subscriber("/cameras/head_camera/image",Image)
self.ts = message_filters.TimeSynchronizer([self.image_sub_1, self.image_sub_2], 10)
self.ts.registerCallback(self.callback)
def callback(self,Image):
try:
cv_image_1 = self.bridge.imgmsg_to_cv2(Image, "bgr8")
cv_image_2 = self.bridge.imgmsg_to_cv2(Image, "bgr8")
except CvBridgeError as e:
print(e)
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(cv_image_1,cv_image_2)
plt.imshow(disparity,'gray')
plt.show()
plt.waitKey(1)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
You must execute rospy.init before subscribing to any topic.

Resources