nbv_reconstruction/utils/data_load.py

114 lines
4.2 KiB
Python

import os
import numpy as np
import json
import cv2
import trimesh
class DataLoadUtil:
@staticmethod
def get_path(root, scene_name, frame_idx):
path = os.path.join(root, scene_name, f"{frame_idx}")
return path
@staticmethod
def get_label_path(root, scene_name):
path = os.path.join(root,scene_name, f"label.json")
return path
@staticmethod
def load_model_points(root, scene_name):
model_path = os.path.join(root, scene_name, "sampled_model_points.txt")
mesh = trimesh.load(model_path)
return mesh.vertices
@staticmethod
def load_depth(path):
depth_path = os.path.join(os.path.dirname(path), "depth", os.path.basename(path) + ".png")
depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
depth = depth.astype(np.float32) / 65535.0
min_depth = 0.01
max_depth = 5.0
depth_meters = min_depth + (max_depth - min_depth) * depth
return depth_meters
@staticmethod
def load_label(path):
with open(path, 'r') as f:
label_data = json.load(f)
return label_data
@staticmethod
def load_rgb(path):
rgb_path = os.path.join(os.path.dirname(path), "rgb", os.path.basename(path) + ".png")
rgb_image = cv2.imread(rgb_path, cv2.IMREAD_COLOR)
return rgb_image
@staticmethod
def load_seg(path):
mask_path = os.path.join(os.path.dirname(path), "mask", os.path.basename(path) + ".png")
mask_image = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
return mask_image
@staticmethod
def cam_pose_transformation(cam_pose_before):
offset = np.asarray([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]])
cam_pose_after = cam_pose_before @ offset
return cam_pose_after
@staticmethod
def load_cam_info(path):
camera_params_path = os.path.join(os.path.dirname(path), "camera_params", os.path.basename(path) + ".json")
with open(camera_params_path, 'r') as f:
label_data = json.load(f)
cam_to_world = np.asarray(label_data["extrinsic"])
cam_to_world = DataLoadUtil.cam_pose_transformation(cam_to_world)
cam_intrinsic = np.asarray(label_data["intrinsic"])
return {
"cam_to_world": cam_to_world,
"cam_intrinsic": cam_intrinsic
}
@staticmethod
def get_target_point_cloud(depth, cam_intrinsic, cam_extrinsic, mask, target_mask_label=255):
h, w = depth.shape
i, j = np.meshgrid(np.arange(w), np.arange(h), indexing='xy')
z = depth
x = (i - cam_intrinsic[0, 2]) * z / cam_intrinsic[0, 0]
y = (j - cam_intrinsic[1, 2]) * z / cam_intrinsic[1, 1]
points_camera = np.stack((x, y, z), axis=-1).reshape(-1, 3)
mask = mask.reshape(-1, 3)
target_mask = np.all(mask == target_mask_label)
target_points_camera = points_camera[target_mask]
target_points_camera_aug = np.concatenate([target_points_camera, np.ones((target_points_camera.shape[0], 1))], axis=-1)
target_points_world = np.dot(cam_extrinsic, target_points_camera_aug.T).T[:, :3]
return {
"points_world": target_points_world,
"points_camera": target_points_camera
}
@staticmethod
def get_point_cloud_world_from_path(path):
cam_info = DataLoadUtil.load_cam_info(path)
depth = DataLoadUtil.load_depth(path)
mask = DataLoadUtil.load_seg(path)
point_cloud = DataLoadUtil.get_target_point_cloud(depth, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask)
return point_cloud['points_world']
@staticmethod
def get_point_cloud_list_from_seq(root, seq_idx, num_frames):
point_cloud_list = []
for idx in range(num_frames):
path = DataLoadUtil.get_path(root, seq_idx, idx)
point_cloud = DataLoadUtil.get_point_cloud_world_from_path(path)
point_cloud_list.append(point_cloud)
return point_cloud_list