210 lines
8.0 KiB
Python
210 lines
8.0 KiB
Python
import numpy as np
|
|
from PytorchBoot.dataset import BaseDataset
|
|
import PytorchBoot.namespace as namespace
|
|
import PytorchBoot.stereotype as stereotype
|
|
from PytorchBoot.config import ConfigManager
|
|
from PytorchBoot.utils.log_util import Log
|
|
import torch
|
|
import os
|
|
import sys
|
|
|
|
sys.path.append(r"/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction")
|
|
|
|
from utils.data_load import DataLoadUtil
|
|
from utils.pose import PoseUtil
|
|
from utils.pts import PtsUtil
|
|
|
|
|
|
@stereotype.dataset("seq_reconstruction_dataset")
|
|
class SeqReconstructionDataset(BaseDataset):
|
|
def __init__(self, config):
|
|
super(SeqReconstructionDataset, self).__init__(config)
|
|
self.config = config
|
|
self.root_dir = config["root_dir"]
|
|
self.split_file_path = config["split_file"]
|
|
self.scene_name_list = self.load_scene_name_list()
|
|
self.datalist = self.get_datalist()
|
|
|
|
self.pts_num = config["pts_num"]
|
|
self.type = config["type"]
|
|
self.cache = config.get("cache")
|
|
self.load_from_preprocess = config.get("load_from_preprocess", False)
|
|
|
|
if self.type == namespace.Mode.TEST:
|
|
#self.model_dir = config["model_dir"]
|
|
self.filter_degree = config["filter_degree"]
|
|
if self.type == namespace.Mode.TRAIN:
|
|
scale_ratio = 1
|
|
self.datalist = self.datalist*scale_ratio
|
|
if self.cache:
|
|
expr_root = ConfigManager.get("runner", "experiment", "root_dir")
|
|
expr_name = ConfigManager.get("runner", "experiment", "name")
|
|
self.cache_dir = os.path.join(expr_root, expr_name, "cache")
|
|
# self.preprocess_cache()
|
|
|
|
def load_scene_name_list(self):
|
|
scene_name_list = []
|
|
with open(self.split_file_path, "r") as f:
|
|
for line in f:
|
|
scene_name = line.strip()
|
|
if not os.path.exists(os.path.join(self.root_dir, scene_name)):
|
|
continue
|
|
scene_name_list.append(scene_name)
|
|
return scene_name_list
|
|
|
|
def get_scene_name_list(self):
|
|
return self.scene_name_list
|
|
|
|
|
|
def get_datalist(self):
|
|
datalist = []
|
|
total = len(self.scene_name_list)
|
|
for idx, scene_name in enumerate(self.scene_name_list):
|
|
print(f"processing {scene_name} ({idx}/{total})")
|
|
scene_max_cr_idx = 0
|
|
frame_len = DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name)
|
|
|
|
for i in range(10,frame_len):
|
|
path = DataLoadUtil.get_path(self.root_dir, scene_name, i)
|
|
pts = DataLoadUtil.load_from_preprocessed_pts(path, "npy")
|
|
print(pts.shape)
|
|
if pts.shape[0] == 0:
|
|
continue
|
|
else:
|
|
break
|
|
print(i)
|
|
datalist.append({
|
|
"scene_name": scene_name,
|
|
"first_frame": i,
|
|
"best_seq_len": -1,
|
|
"max_coverage_rate": 1.0,
|
|
"label_idx": scene_max_cr_idx,
|
|
})
|
|
return datalist
|
|
|
|
def preprocess_cache(self):
|
|
Log.info("preprocessing cache...")
|
|
for item_idx in range(len(self.datalist)):
|
|
self.__getitem__(item_idx)
|
|
Log.success("finish preprocessing cache.")
|
|
|
|
def load_from_cache(self, scene_name, curr_frame_idx):
|
|
cache_name = f"{scene_name}_{curr_frame_idx}.txt"
|
|
cache_path = os.path.join(self.cache_dir, cache_name)
|
|
if os.path.exists(cache_path):
|
|
data = np.loadtxt(cache_path)
|
|
return data
|
|
else:
|
|
return None
|
|
|
|
def save_to_cache(self, scene_name, curr_frame_idx, data):
|
|
cache_name = f"{scene_name}_{curr_frame_idx}.txt"
|
|
cache_path = os.path.join(self.cache_dir, cache_name)
|
|
try:
|
|
np.savetxt(cache_path, data)
|
|
except Exception as e:
|
|
Log.error(f"Save cache failed: {e}")
|
|
|
|
def seq_combined_pts(self, scene, frame_idx_list):
|
|
all_combined_pts = []
|
|
for i in frame_idx_list:
|
|
path = DataLoadUtil.get_path(self.root_dir, scene, i)
|
|
pts = DataLoadUtil.load_from_preprocessed_pts(path,"npy")
|
|
if pts.shape[0] == 0:
|
|
continue
|
|
all_combined_pts.append(pts)
|
|
all_combined_pts = np.vstack(all_combined_pts)
|
|
downsampled_all_pts = PtsUtil.voxel_downsample_point_cloud(all_combined_pts, 0.003)
|
|
return downsampled_all_pts
|
|
|
|
def __getitem__(self, index):
|
|
data_item_info = self.datalist[index]
|
|
max_coverage_rate = data_item_info["max_coverage_rate"]
|
|
best_seq_len = data_item_info["best_seq_len"]
|
|
scene_name = data_item_info["scene_name"]
|
|
(
|
|
scanned_views_pts,
|
|
scanned_coverages_rate,
|
|
scanned_n_to_world_pose,
|
|
) = ([], [], [])
|
|
view = data_item_info["first_frame"]
|
|
frame_idx = view
|
|
view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
|
|
cam_info = DataLoadUtil.load_cam_info(view_path, binocular=True)
|
|
|
|
n_to_world_pose = cam_info["cam_to_world"]
|
|
target_point_cloud = (
|
|
DataLoadUtil.load_from_preprocessed_pts(view_path)
|
|
)
|
|
downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud(
|
|
target_point_cloud, self.pts_num
|
|
)
|
|
scanned_views_pts.append(downsampled_target_point_cloud)
|
|
|
|
n_to_world_6d = PoseUtil.matrix_to_rotation_6d_numpy(
|
|
np.asarray(n_to_world_pose[:3, :3])
|
|
)
|
|
first_left_cam_pose = cam_info["cam_to_world"]
|
|
first_center_cam_pose = cam_info["cam_to_world_O"]
|
|
first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose)
|
|
n_to_world_trans = n_to_world_pose[:3, 3]
|
|
n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0)
|
|
scanned_n_to_world_pose.append(n_to_world_9d)
|
|
|
|
frame_list = []
|
|
for i in range(DataLoadUtil.get_scene_seq_length(self.root_dir, scene_name)):
|
|
frame_list.append(i)
|
|
gt_pts = self.seq_combined_pts(scene_name, frame_list)
|
|
data_item = {
|
|
"first_scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
|
|
"first_scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
|
|
"seq_max_coverage_rate": max_coverage_rate, # Float, range(0, 1)
|
|
"best_seq_len": best_seq_len, # Int
|
|
"scene_name": scene_name, # String
|
|
"gt_pts": gt_pts, # Ndarray(N x 3)
|
|
"scene_path": os.path.join(self.root_dir, scene_name), # String
|
|
"O_to_L_pose": first_O_to_first_L_pose,
|
|
}
|
|
|
|
return data_item
|
|
|
|
def __len__(self):
|
|
return len(self.datalist)
|
|
|
|
|
|
# -------------- Debug ---------------- #
|
|
if __name__ == "__main__":
|
|
import torch
|
|
from tqdm import tqdm
|
|
import pickle
|
|
import os
|
|
|
|
seed = 0
|
|
torch.manual_seed(seed)
|
|
np.random.seed(seed)
|
|
|
|
config = {
|
|
"root_dir": "/media/hofee/data/data/test_bottle/view",
|
|
"source": "seq_reconstruction_dataset",
|
|
"split_file": "/media/hofee/data/data/test_bottle/test_bottle.txt",
|
|
"load_from_preprocess": True,
|
|
"filter_degree": 75,
|
|
"num_workers": 0,
|
|
"pts_num": 8192,
|
|
"type": namespace.Mode.TEST,
|
|
}
|
|
|
|
output_dir = "/media/hofee/data/data/test_bottle/preprocessed_dataset"
|
|
os.makedirs(output_dir, exist_ok=True)
|
|
|
|
ds = SeqReconstructionDataset(config)
|
|
for i in tqdm(range(len(ds)), desc="processing dataset"):
|
|
output_path = os.path.join(output_dir, f"item_{i}.pkl")
|
|
item = ds.__getitem__(i)
|
|
for key, value in item.items():
|
|
if isinstance(value, np.ndarray):
|
|
item[key] = value.tolist()
|
|
#import ipdb; ipdb.set_trace()
|
|
with open(output_path, "wb") as f:
|
|
pickle.dump(item, f)
|