diff --git a/active_grasp/controller.py b/active_grasp/controller.py index 1653a89..569634e 100644 --- a/active_grasp/controller.py +++ b/active_grasp/controller.py @@ -155,6 +155,7 @@ class GraspController: "view_count": len(points), "distance": d, } + info.update(self.policy.info) info.update(Timer.timers) return info diff --git a/active_grasp/nbv.py b/active_grasp/nbv.py index 93d4a66..c195814 100644 --- a/active_grasp/nbv.py +++ b/active_grasp/nbv.py @@ -1,6 +1,7 @@ import itertools import numpy as np import rospy +from .timer import Timer from .policy import MultiViewPolicy @@ -12,7 +13,9 @@ class NextBestView(MultiViewPolicy): def activate(self, bbox, view_sphere): super().activate(bbox, view_sphere) - self.generate_view_candidates() + with Timer("view_generation"): + self.generate_view_candidates() + self.info["view_candidates_count"] = len(self.view_candidates) def generate_view_candidates(self): thetas = np.deg2rad([15, 30, 45]) @@ -27,10 +30,13 @@ class NextBestView(MultiViewPolicy): if len(self.views) > self.max_views or self.best_grasp_prediction_is_stable(): self.done = True else: - self.integrate(img, x) + with Timer("state_update"): + self.integrate(img, x) views = self.view_candidates - gains = [self.ig_fn(v) for v in views] - costs = [self.cost_fn(v) for v in views] + with Timer("ig_computation"): + gains = [self.ig_fn(v) for v in views] + with Timer("cost_computation"): + costs = [self.cost_fn(v) for v in views] utilities = gains / np.sum(gains) - costs / np.sum(costs) self.vis.views(self.base_frame, self.intrinsic, views, utilities) i = np.argmax(utilities) diff --git a/active_grasp/policy.py b/active_grasp/policy.py index 02bf507..a98075c 100644 --- a/active_grasp/policy.py +++ b/active_grasp/policy.py @@ -3,6 +3,7 @@ from sensor_msgs.msg import CameraInfo from pathlib import Path import rospy +from .timer import Timer from .visualization import Visualizer from robot_helpers.model import KDLModel from robot_helpers.ros import tf @@ -52,6 +53,7 @@ class Policy: self.best_grasp = None self.x_d = None self.done = False + self.info = {} def calibrate_task_frame(self): self.T_base_task = Transform.translation(self.bbox.center - np.full(3, 0.15)) @@ -118,28 +120,30 @@ class MultiViewPolicy(Policy): def integrate(self, img, x): self.views.append(x) - self.tsdf.integrate(img, self.intrinsic, x.inv() * self.T_base_task) - - self.vis.scene_cloud(self.task_frame, self.tsdf.get_scene_cloud()) - self.vis.map_cloud(self.task_frame, self.tsdf.get_map_cloud()) self.vis.path(self.base_frame, self.views) - tsdf_grid, voxel_size = self.tsdf.get_grid(), self.tsdf.voxel_size - out = self.vgn.predict(tsdf_grid) + with Timer("tsdf_integration"): + self.tsdf.integrate(img, self.intrinsic, x.inv() * self.T_base_task) + self.vis.scene_cloud(self.task_frame, self.tsdf.get_scene_cloud()) + self.vis.map_cloud(self.task_frame, self.tsdf.get_map_cloud()) + + with Timer("grasp_prediction"): + tsdf_grid, voxel_size = self.tsdf.get_grid(), self.tsdf.voxel_size + out = self.vgn.predict(tsdf_grid) self.vis.quality(self.task_frame, self.tsdf.voxel_size, out.qual, 0.5) t = (len(self.views) - 1) % self.T self.qual_hist[t, ...] = out.qual - grasps = select_grid(voxel_size, out, threshold=self.qual_threshold) - grasps, scores = self.sort_grasps(grasps) + with Timer("grasp_selection"): + grasps = select_grid(voxel_size, out, threshold=self.qual_threshold) + grasps, scores = self.sort_grasps(grasps) + self.vis.grasps(self.base_frame, grasps, scores) if len(grasps) > 0: self.best_grasp = grasps[0] self.vis.best_grasp(self.base_frame, grasps[0], scores[0]) - self.vis.grasps(self.base_frame, grasps, scores) - def compute_error(x_d, x): linear = x_d.translation - x.translation