Draw camera path
This commit is contained in:
parent
0cdeaa34f3
commit
d431a95b5a
@ -5,30 +5,28 @@ import argparse
|
||||
import cv_bridge
|
||||
import numpy as np
|
||||
import rospy
|
||||
import tf2_ros
|
||||
|
||||
import control_msgs.msg
|
||||
import controller_manager_msgs.srv
|
||||
import franka_gripper.msg
|
||||
from geometry_msgs.msg import Pose
|
||||
from sensor_msgs.msg import JointState, Image, CameraInfo
|
||||
import std_srvs.srv
|
||||
|
||||
from robot_tools.controllers import CartesianPoseController
|
||||
from robot_tools.ros.conversions import *
|
||||
from robot_tools.ros.tf import TF2Client
|
||||
from robot_utils.controllers import CartesianPoseController
|
||||
from robot_utils.ros.conversions import *
|
||||
from simulation import Simulation
|
||||
|
||||
|
||||
class BtSimNode:
|
||||
def __init__(self, gui):
|
||||
self.sim = Simulation(gui=gui, sleep=False)
|
||||
self.sim = Simulation(gui=gui)
|
||||
|
||||
self.controller_update_rate = 60
|
||||
self.joint_state_publish_rate = 60
|
||||
self.camera_publish_rate = 5
|
||||
|
||||
self.cv_bridge = cv_bridge.CvBridge()
|
||||
self.tf_tree = TF2Client()
|
||||
|
||||
self.setup_robot_topics()
|
||||
self.setup_camera_topics()
|
||||
@ -44,13 +42,11 @@ class BtSimNode:
|
||||
self.joint_pub = rospy.Publisher("joint_states", JointState, queue_size=10)
|
||||
|
||||
def setup_controllers(self):
|
||||
self.cartesian_pose_controller = CartesianPoseController(
|
||||
self.sim.arm, stopped=False
|
||||
)
|
||||
self.cartesian_pose_controller = CartesianPoseController(self.sim.arm)
|
||||
rospy.Subscriber("command", Pose, self.target_pose_cb)
|
||||
|
||||
def target_pose_cb(self, msg):
|
||||
self.cartesian_pose_controller.set_target(from_pose_msg(msg))
|
||||
self.cartesian_pose_controller.x_d = from_pose_msg(msg)
|
||||
|
||||
def setup_gripper_interface(self):
|
||||
self.gripper_interface = GripperInterface(self.sim.gripper)
|
||||
@ -66,6 +62,8 @@ class BtSimNode:
|
||||
self.depth_pub = rospy.Publisher("/cam/depth/image_raw", Image, queue_size=10)
|
||||
|
||||
def broadcast_transforms(self):
|
||||
self.static_broadcaster = tf2_ros.StaticTransformBroadcaster()
|
||||
|
||||
msgs = []
|
||||
msg = geometry_msgs.msg.TransformStamped()
|
||||
msg.header.stamp = rospy.Time.now()
|
||||
@ -82,13 +80,13 @@ class BtSimNode:
|
||||
|
||||
msgs.append(msg)
|
||||
|
||||
self.tf_tree.static_broadcaster.sendTransform(msgs)
|
||||
self.static_broadcaster.sendTransform(msgs)
|
||||
|
||||
def reset(self, req):
|
||||
self.reset_requested = True
|
||||
rospy.sleep(1.0) # wait for the latest sim step to finish
|
||||
self.sim.reset()
|
||||
self.cartesian_pose_controller.set_target(self.sim.arm.pose())
|
||||
self.cartesian_pose_controller.x_d = self.sim.arm.pose()
|
||||
self.step_cnt = 0
|
||||
self.reset_requested = False
|
||||
return std_srvs.srv.TriggerResponse(success=True)
|
||||
|
250
policies.py
250
policies.py
@ -7,14 +7,17 @@ import scipy.interpolate
|
||||
from geometry_msgs.msg import Pose
|
||||
from sensor_msgs.msg import Image, CameraInfo
|
||||
import std_srvs.srv
|
||||
from visualization_msgs.msg import Marker, MarkerArray
|
||||
|
||||
|
||||
from robot_utils.perception import *
|
||||
from robot_utils.spatial import Rotation, Transform
|
||||
from robot_utils.ros.conversions import *
|
||||
from robot_utils.ros.panda import PandaGripperClient
|
||||
from robot_utils.ros import tf
|
||||
from robot_utils.ros.rviz import *
|
||||
import vgn.vis
|
||||
|
||||
from robot_tools.spatial import Rotation, Transform
|
||||
from robot_tools.ros.conversions import *
|
||||
from robot_tools.ros.control import ControllerManagerClient
|
||||
from robot_tools.ros.panda import PandaGripperClient
|
||||
from robot_tools.ros.tf import TF2Client
|
||||
from robot_tools.perception import *
|
||||
from vgn import vis
|
||||
from vgn.detection import VGN, compute_grasps
|
||||
|
||||
|
||||
@ -31,50 +34,19 @@ def get_controller(name):
|
||||
|
||||
class BaseController:
|
||||
def __init__(self):
|
||||
self.frame_id = rospy.get_param("~frame_id")
|
||||
self.frame = rospy.get_param("~frame_id")
|
||||
self.length = rospy.get_param("~length")
|
||||
|
||||
self.cv_bridge = cv_bridge.CvBridge()
|
||||
self.tf = TF2Client()
|
||||
|
||||
self.reset_client = rospy.ServiceProxy("/reset", std_srvs.srv.Trigger)
|
||||
|
||||
self.tsdf = UniformTSDFVolume(0.3, 40)
|
||||
self.vgn = VGN(Path(rospy.get_param("vgn/model")))
|
||||
|
||||
self.setup_robot_connection()
|
||||
self.setup_camera_connection()
|
||||
self.lookup_transforms()
|
||||
|
||||
def setup_robot_connection(self):
|
||||
self.base_frame_id = rospy.get_param("~base_frame_id")
|
||||
self.ee_frame_id = rospy.get_param("~ee_frame_id")
|
||||
self.ee_grasp_offset = Transform.from_list(rospy.get_param("~ee_grasp_offset"))
|
||||
self.target_pose_pub = rospy.Publisher("/command", Pose, queue_size=10)
|
||||
self.gripper = PandaGripperClient()
|
||||
|
||||
def send_pose_command(self, target):
|
||||
self.target_pose_pub.publish(to_pose_msg(target))
|
||||
|
||||
def setup_camera_connection(self):
|
||||
self.cam_frame_id = rospy.get_param("~camera/frame_id")
|
||||
info_topic = rospy.get_param("~camera/info_topic")
|
||||
msg = rospy.wait_for_message(info_topic, CameraInfo, rospy.Duration(2.0))
|
||||
self.intrinsic = from_camera_info_msg(msg)
|
||||
depth_topic = rospy.get_param("~camera/depth_topic")
|
||||
rospy.Subscriber(depth_topic, Image, self.sensor_cb, queue_size=1)
|
||||
|
||||
def sensor_cb(self, msg):
|
||||
self.last_depth_img = self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32)
|
||||
self.last_extrinsic = self.tf.lookup(
|
||||
self.cam_frame_id,
|
||||
self.frame_id,
|
||||
msg.header.stamp,
|
||||
rospy.Duration(0.1),
|
||||
)
|
||||
|
||||
def lookup_transforms(self):
|
||||
self.T_B_O = self.tf.lookup(self.base_frame_id, self.frame_id, rospy.Time.now())
|
||||
self._setup_robot_connection()
|
||||
self._setup_camera_connection()
|
||||
self._setup_rviz_connection()
|
||||
self._lookup_transforms()
|
||||
|
||||
def run(self):
|
||||
self.reset()
|
||||
@ -82,70 +54,159 @@ class BaseController:
|
||||
self.execute_grasp()
|
||||
|
||||
def reset(self):
|
||||
vis.clear()
|
||||
req = std_srvs.srv.TriggerRequest()
|
||||
self.reset_client(req)
|
||||
self._reset_env()
|
||||
self._clear_rviz()
|
||||
rospy.sleep(1.0) # wait for states to be updated
|
||||
self._init_policy()
|
||||
|
||||
self.viewpoints = []
|
||||
self.done = False
|
||||
self.best_grasp = None
|
||||
|
||||
def explore(self):
|
||||
r = rospy.Rate(self.rate)
|
||||
while not self.done:
|
||||
self.update()
|
||||
self._update()
|
||||
r.sleep()
|
||||
|
||||
def update(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def execute_grasp(self):
|
||||
if not self.best_grasp:
|
||||
return
|
||||
|
||||
grasp = self.best_grasp
|
||||
|
||||
# Ensure that the camera is pointing forward.
|
||||
rot = grasp.pose.rotation
|
||||
if rot.as_matrix()[:, 0][0] < 0:
|
||||
grasp.pose.rotation = rot * Rotation.from_euler("z", np.pi)
|
||||
target = self.T_B_O * grasp.pose * self.ee_grasp_offset.inv()
|
||||
target = self.T_B_O * grasp.pose * self._ee_grasp_offset.inv()
|
||||
|
||||
self.gripper.move(0.08)
|
||||
self.send_pose_command(target)
|
||||
self._send_pose_command(target)
|
||||
rospy.sleep(3.0)
|
||||
self.gripper.move(0.0)
|
||||
target.translation[2] += 0.3
|
||||
self.send_pose_command(target)
|
||||
self._send_pose_command(target)
|
||||
rospy.sleep(2.0)
|
||||
|
||||
def predict_best_grasp(self):
|
||||
tsdf_grid = self.tsdf.get_grid()
|
||||
out = self.vgn.predict(tsdf_grid)
|
||||
score_fn = lambda g: g.pose.translation[2]
|
||||
grasps = compute_grasps(self.tsdf.voxel_size, out, score_fn)
|
||||
vis.draw_grasps(grasps, 0.05)
|
||||
return grasps[0] if len(grasps) > 0 else None
|
||||
def _setup_robot_connection(self):
|
||||
self.base_frame = rospy.get_param("~base_frame_id")
|
||||
self.ee_frame = rospy.get_param("~ee_frame_id")
|
||||
self._ee_grasp_offset = Transform.from_list(rospy.get_param("~ee_grasp_offset"))
|
||||
self.target_pose_pub = rospy.Publisher("/command", Pose, queue_size=10)
|
||||
self.gripper = PandaGripperClient()
|
||||
|
||||
def _setup_camera_connection(self):
|
||||
self._cam_frame_id = rospy.get_param("~camera/frame_id")
|
||||
info_topic = rospy.get_param("~camera/info_topic")
|
||||
msg = rospy.wait_for_message(info_topic, CameraInfo, rospy.Duration(2.0))
|
||||
self.intrinsic = from_camera_info_msg(msg)
|
||||
depth_topic = rospy.get_param("~camera/depth_topic")
|
||||
rospy.Subscriber(depth_topic, Image, self._sensor_cb, queue_size=1)
|
||||
|
||||
class SingleViewBaseline(BaseController):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.rate = 1
|
||||
def _sensor_cb(self, msg):
|
||||
self.last_depth_img = self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32)
|
||||
self.last_extrinsic = tf.lookup(
|
||||
self._cam_frame_id,
|
||||
self.frame,
|
||||
msg.header.stamp,
|
||||
rospy.Duration(0.1),
|
||||
)
|
||||
|
||||
def reset(self):
|
||||
super().reset()
|
||||
def _setup_rviz_connection(self):
|
||||
self.path_pub = rospy.Publisher("path", MarkerArray, queue_size=1, latch=True)
|
||||
|
||||
def update(self):
|
||||
def _lookup_transforms(self):
|
||||
self.T_B_O = tf.lookup(self.base_frame, self.frame)
|
||||
|
||||
def _reset_env(self):
|
||||
req = std_srvs.srv.TriggerRequest()
|
||||
self.reset_client(req)
|
||||
|
||||
def _clear_rviz(self):
|
||||
vgn.vis.clear()
|
||||
self.path_pub.publish(DELETE_MARKER_ARRAY_MSG)
|
||||
|
||||
def _init_policy(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def _update(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def _draw_camera_path(self, frustum=False):
|
||||
identity = Transform.identity()
|
||||
color = np.r_[31, 119, 180] / 255.0
|
||||
|
||||
# Spheres for each viewpoint
|
||||
scale = 0.01 * np.ones(3)
|
||||
spheres = create_marker(Marker.SPHERE_LIST, self.frame, identity, scale, color)
|
||||
spheres.id = 0
|
||||
spheres.points = [to_point_msg(p.translation) for p in self.viewpoints]
|
||||
|
||||
# Line strip connecting viewpoints
|
||||
scale = [0.005, 0.0, 0.0]
|
||||
lines = create_marker(Marker.LINE_STRIP, self.frame, identity, scale, color)
|
||||
lines.id = 1
|
||||
lines.points = [to_point_msg(p.translation) for p in self.viewpoints]
|
||||
|
||||
markers = [spheres, lines]
|
||||
|
||||
# Frustums
|
||||
if frustum:
|
||||
for i, pose in enumerate(self.viewpoints):
|
||||
msg = create_cam_marker(self.intrinsic, pose, self.frame)
|
||||
msg.id = i + 2
|
||||
markers.append(msg)
|
||||
|
||||
self.path_pub.publish(MarkerArray(markers))
|
||||
|
||||
def _draw_scene_cloud(self):
|
||||
cloud = self.tsdf.get_scene_cloud()
|
||||
vgn.vis.draw_points(np.asarray(cloud.points))
|
||||
|
||||
def _integrate_latest_image(self):
|
||||
self.viewpoints.append(self.last_extrinsic.inv())
|
||||
self.tsdf.integrate(
|
||||
self.last_depth_img,
|
||||
self.intrinsic,
|
||||
self.last_extrinsic,
|
||||
)
|
||||
cloud = self.tsdf.get_scene_cloud()
|
||||
vis.draw_points(np.asarray(cloud.points))
|
||||
self.best_grasp = self.predict_best_grasp()
|
||||
|
||||
def _predict_best_grasp(self):
|
||||
tsdf_grid = self.tsdf.get_grid()
|
||||
out = self.vgn.predict(tsdf_grid)
|
||||
score_fn = lambda g: g.pose.translation[2]
|
||||
grasps = compute_grasps(self.tsdf.voxel_size, out, score_fn)
|
||||
vgn.vis.draw_grasps(grasps, 0.05)
|
||||
return grasps[0] if len(grasps) > 0 else None
|
||||
|
||||
def _send_pose_command(self, target):
|
||||
self.target_pose_pub.publish(to_pose_msg(target))
|
||||
|
||||
|
||||
class SingleViewBaseline(BaseController):
|
||||
"""
|
||||
Integrate a single image from the initial viewpoint.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.rate = 1
|
||||
|
||||
def _init_policy(self):
|
||||
pass
|
||||
|
||||
def _update(self):
|
||||
self._integrate_latest_image()
|
||||
self._draw_scene_cloud()
|
||||
self._draw_camera_path(frustum=True)
|
||||
self.best_grasp = self._predict_best_grasp()
|
||||
self.done = True
|
||||
|
||||
|
||||
class FixedTrajectoryBaseline(BaseController):
|
||||
"""Follow a pre-defined circular trajectory."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.rate = 10
|
||||
@ -153,56 +214,33 @@ class FixedTrajectoryBaseline(BaseController):
|
||||
self.radius = 0.1
|
||||
self.m = scipy.interpolate.interp1d([0, self.duration], [np.pi, 3.0 * np.pi])
|
||||
|
||||
def reset(self):
|
||||
super().reset()
|
||||
def _init_policy(self):
|
||||
self.tic = rospy.Time.now()
|
||||
timeout = rospy.Duration(0.1)
|
||||
x0 = self.tf.lookup(self.base_frame_id, self.ee_frame_id, self.tic, timeout)
|
||||
x0 = tf.lookup(self.base_frame, self.ee_frame)
|
||||
self.center = np.r_[x0.translation[0] + self.radius, x0.translation[1:]]
|
||||
self.target = x0
|
||||
|
||||
def update(self):
|
||||
def _update(self):
|
||||
elapsed_time = (rospy.Time.now() - self.tic).to_sec()
|
||||
if elapsed_time > self.duration:
|
||||
self.best_grasp = self.predict_best_grasp()
|
||||
self.best_grasp = self._predict_best_grasp()
|
||||
self.done = True
|
||||
else:
|
||||
self.tsdf.integrate(
|
||||
self.last_depth_img,
|
||||
self.intrinsic,
|
||||
self.last_extrinsic,
|
||||
)
|
||||
cloud = self.tsdf.get_scene_cloud()
|
||||
vis.draw_points(np.asarray(cloud.points))
|
||||
# Update state
|
||||
self._integrate_latest_image()
|
||||
|
||||
# Compute next viewpoint
|
||||
t = self.m(elapsed_time)
|
||||
self.target.translation = (
|
||||
self.center
|
||||
+ np.r_[self.radius * np.cos(t), self.radius * np.sin(t), 0.0]
|
||||
)
|
||||
self.send_pose_command(self.target)
|
||||
self._send_pose_command(self.target)
|
||||
|
||||
|
||||
class Map:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
# Draw
|
||||
self._draw_scene_cloud()
|
||||
self._draw_camera_path()
|
||||
|
||||
|
||||
class MultiViewPicking(BaseController):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.rate = 5
|
||||
self.grid = np.zeros((40, 40, 40))
|
||||
|
||||
def reset(self):
|
||||
super().reset()
|
||||
self.tic = rospy.Time.now()
|
||||
timeout = rospy.Duration(0.1)
|
||||
x0 = self.tf.lookup(self.base_frame_id, self.ee_frame_id, self.tic, timeout)
|
||||
self.center = np.r_[x0.translation[0] + self.radius, x0.translation[1:]]
|
||||
self.target = x0
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
pass
|
||||
|
1
run.py
1
run.py
@ -12,6 +12,7 @@ def create_parser():
|
||||
choices=[
|
||||
"single-view",
|
||||
"fixed-trajectory",
|
||||
"mvp",
|
||||
],
|
||||
)
|
||||
return parser
|
||||
|
@ -2,19 +2,13 @@ from pathlib import Path
|
||||
import pybullet as p
|
||||
import rospkg
|
||||
|
||||
from robot_tools.bullet import *
|
||||
from robot_tools.spatial import Rotation, Transform
|
||||
from robot_tools.utils import scan_dir_for_urdfs
|
||||
from robot_utils.bullet import *
|
||||
from robot_utils.spatial import Rotation, Transform
|
||||
|
||||
|
||||
class Simulation(BtManipulationSim):
|
||||
def __init__(self, gui=True, sleep=True):
|
||||
super().__init__(gui, sleep)
|
||||
|
||||
self.rate = 60
|
||||
self.dt = 1.0 / self.rate
|
||||
p.setPhysicsEngineParameter(fixedTimeStep=self.dt, numSubSteps=4)
|
||||
|
||||
def __init__(self, gui=True):
|
||||
super().__init__(gui=gui, sleep=False)
|
||||
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
|
||||
p.resetDebugVisualizerCamera(1.4, 50, -35, [0.0, 0.0, 0.6])
|
||||
|
||||
@ -25,7 +19,7 @@ class Simulation(BtManipulationSim):
|
||||
def find_object_urdfs(self):
|
||||
rospack = rospkg.RosPack()
|
||||
root = Path(rospack.get_path("vgn")) / "data/urdfs/packed/test"
|
||||
self.urdfs = scan_dir_for_urdfs(root)
|
||||
self.urdfs = [str(f) for f in root.iterdir() if f.suffix == ".urdf"]
|
||||
|
||||
def add_table(self):
|
||||
p.loadURDF("plane.urdf")
|
||||
|
Loading…
x
Reference in New Issue
Block a user