add seq_dataset
This commit is contained in:
parent
0280dc7292
commit
9ec3a00fd4
22
configs/local/split_dataset_config.yaml
Normal file
22
configs/local/split_dataset_config.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
runner:
|
||||
general:
|
||||
seed: 0
|
||||
device: cpu
|
||||
cuda_visible_devices: "0,1,2,3,4,5,6,7"
|
||||
|
||||
experiment:
|
||||
name: debug
|
||||
root_dir: "experiments"
|
||||
|
||||
split:
|
||||
root_dir: "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/scenes"
|
||||
type: "unseen_instance" # "unseen_category"
|
||||
datasets:
|
||||
OmniObject3d_train:
|
||||
path: "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/OmniObject3d_train.txt"
|
||||
ratio: 0.9
|
||||
|
||||
OmniObject3d_test:
|
||||
path: "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/OmniObject3d_test.txt"
|
||||
ratio: 0.1
|
32
configs/local/strategy_generate_config.yaml
Normal file
32
configs/local/strategy_generate_config.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
|
||||
runner:
|
||||
general:
|
||||
seed: 0
|
||||
device: cpu
|
||||
cuda_visible_devices: "0,1,2,3,4,5,6,7"
|
||||
|
||||
|
||||
experiment:
|
||||
name: debug
|
||||
root_dir: "experiments"
|
||||
|
||||
generate:
|
||||
voxel_threshold: 0.01
|
||||
overlap_threshold: 0.5
|
||||
filter_degree: 75
|
||||
to_specified_dir: True # if True, output_dir is used, otherwise, root_dir is used
|
||||
save_points: False
|
||||
save_best_combined_points: True
|
||||
save_mesh: True
|
||||
overwrite: False
|
||||
dataset_list:
|
||||
- OmniObject3d
|
||||
|
||||
datasets:
|
||||
OmniObject3d:
|
||||
#"/media/hofee/data/data/temp_output"
|
||||
root_dir: "/media/hofee/repository/nbv_reconstruction_data_512"
|
||||
model_dir: "/media/hofee/data/data/scaled_object_meshes"
|
||||
#output_dir: "/media/hofee/data/data/label_output"
|
||||
|
||||
|
103
configs/local/train_config.yaml
Normal file
103
configs/local/train_config.yaml
Normal file
@ -0,0 +1,103 @@
|
||||
|
||||
runner:
|
||||
general:
|
||||
seed: 1
|
||||
device: cuda
|
||||
cuda_visible_devices: "0,1,2,3,4,5,6,7"
|
||||
parallel: False
|
||||
|
||||
experiment:
|
||||
name: local_eval
|
||||
root_dir: "experiments"
|
||||
use_checkpoint: True
|
||||
epoch: 600 # -1 stands for last epoch
|
||||
max_epochs: 5000
|
||||
save_checkpoint_interval: 1
|
||||
test_first: True
|
||||
|
||||
train:
|
||||
optimizer:
|
||||
type: Adam
|
||||
lr: 0.0001
|
||||
losses:
|
||||
- gf_loss
|
||||
dataset: OmniObject3d_train
|
||||
test:
|
||||
frequency: 3 # test frequency
|
||||
dataset_list:
|
||||
- OmniObject3d_test
|
||||
|
||||
pipeline: nbv_reconstruction_pipeline
|
||||
|
||||
dataset:
|
||||
OmniObject3d_train:
|
||||
root_dir: "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/scenes"
|
||||
model_dir: "/media/hofee/data/data/scaled_object_meshes"
|
||||
source: nbv_reconstruction_dataset
|
||||
split_file: "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/OmniObject3d_train.txt"
|
||||
type: train
|
||||
ratio: 1
|
||||
batch_size: 1
|
||||
num_workers: 12
|
||||
pts_num: 4096
|
||||
|
||||
OmniObject3d_test:
|
||||
root_dir: "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/scenes"
|
||||
model_dir: "/media/hofee/data/data/scaled_object_meshes"
|
||||
source: nbv_reconstruction_dataset
|
||||
split_file: "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/OmniObject3d_train.txt"
|
||||
type: test
|
||||
filter_degree: 75
|
||||
eval_list:
|
||||
- pose_diff
|
||||
- coverage_rate_increase
|
||||
ratio: 0.1
|
||||
batch_size: 1
|
||||
num_workers: 12
|
||||
pts_num: 4096
|
||||
|
||||
pipeline:
|
||||
nbv_reconstruction_pipeline:
|
||||
pts_encoder: pointnet_encoder
|
||||
seq_encoder: transformer_seq_encoder
|
||||
pose_encoder: pose_encoder
|
||||
view_finder: gf_view_finder
|
||||
|
||||
module:
|
||||
|
||||
pointnet_encoder:
|
||||
in_dim: 3
|
||||
out_dim: 1024
|
||||
global_feat: True
|
||||
feature_transform: False
|
||||
|
||||
transformer_seq_encoder:
|
||||
pts_embed_dim: 1024
|
||||
pose_embed_dim: 256
|
||||
num_heads: 4
|
||||
ffn_dim: 256
|
||||
num_layers: 3
|
||||
output_dim: 2048
|
||||
|
||||
gf_view_finder:
|
||||
t_feat_dim: 128
|
||||
pose_feat_dim: 256
|
||||
main_feat_dim: 2048
|
||||
regression_head: Rx_Ry_and_T
|
||||
pose_mode: rot_matrix
|
||||
per_point_feature: False
|
||||
sample_mode: ode
|
||||
sampling_steps: 500
|
||||
sde_mode: ve
|
||||
|
||||
pose_encoder:
|
||||
pose_dim: 9
|
||||
out_dim: 256
|
||||
|
||||
loss_function:
|
||||
gf_loss:
|
||||
|
||||
evaluation_method:
|
||||
pose_diff:
|
||||
coverage_rate_increase:
|
||||
renderer_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py"
|
53
configs/local/view_generate_config.yaml
Normal file
53
configs/local/view_generate_config.yaml
Normal file
@ -0,0 +1,53 @@
|
||||
runner:
|
||||
general:
|
||||
seed: 0
|
||||
device: cpu
|
||||
cuda_visible_devices: 0,1,2,3,4,5,6,7
|
||||
experiment:
|
||||
name: debug
|
||||
root_dir: experiments
|
||||
generate:
|
||||
object_dir: /media/hofee/data/data/scaled_object_meshes
|
||||
table_model_path: /media/hofee/data/data/others/table.obj
|
||||
output_dir: /media/hofee/repository/nbv_reconstruction_data_512
|
||||
binocular_vision: true
|
||||
plane_size: 10
|
||||
max_views: 512
|
||||
min_views: 64
|
||||
max_diag: 0.7
|
||||
min_diag: 0.1
|
||||
random_config:
|
||||
display_table:
|
||||
min_height: 0.05
|
||||
max_height: 0.15
|
||||
min_radius: 0.3
|
||||
max_radius: 0.5
|
||||
min_R: 0.05
|
||||
max_R: 0.3
|
||||
min_G: 0.05
|
||||
max_G: 0.3
|
||||
min_B: 0.05
|
||||
max_B: 0.3
|
||||
display_object:
|
||||
min_x: 0
|
||||
max_x: 0.03
|
||||
min_y: 0
|
||||
max_y: 0.03
|
||||
min_z: 0.01
|
||||
max_z: 0.01
|
||||
random_rotation_ratio: 0.3
|
||||
random_objects:
|
||||
num: 4
|
||||
cluster: 0.9
|
||||
light_and_camera_config:
|
||||
Camera:
|
||||
near_plane: 0.01
|
||||
far_plane: 5
|
||||
fov_vertical: 25
|
||||
resolution: [1280,800]
|
||||
eye_distance: 0.15
|
||||
eye_angle: 25
|
||||
Light:
|
||||
location: [0,0,3.5]
|
||||
orientation: [0,0,0]
|
||||
power: 150
|
@ -145,28 +145,28 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
"max_coverage_rate": max_coverage_rate,
|
||||
"scene_name": scene_name
|
||||
}
|
||||
# if self.type == namespace.Mode.TEST:
|
||||
# diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
|
||||
# voxel_threshold = diag*0.02
|
||||
# model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
|
||||
# pts_list = []
|
||||
# for view in scanned_views:
|
||||
# frame_idx = view[0]
|
||||
# view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
|
||||
# point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(view_path, binocular=True)
|
||||
# cam_params = DataLoadUtil.load_cam_info(view_path, binocular=True)
|
||||
# sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=self.filter_degree)
|
||||
# pts_list.append(sampled_point_cloud)
|
||||
# nL_to_world_pose = cam_params["cam_to_world"]
|
||||
# nO_to_world_pose = cam_params["cam_to_world_O"]
|
||||
# nO_to_nL_pose = np.dot(np.linalg.inv(nL_to_world_pose), nO_to_world_pose)
|
||||
# data_item["scanned_target_pts_list"] = pts_list
|
||||
# data_item["model_points_normals"] = model_points_normals
|
||||
# data_item["voxel_threshold"] = voxel_threshold
|
||||
# data_item["filter_degree"] = self.filter_degree
|
||||
# data_item["scene_path"] = os.path.join(self.root_dir, scene_name)
|
||||
# data_item["first_frame_to_world"] = np.asarray(first_frame_to_world, dtype=np.float32)
|
||||
# data_item["nO_to_nL_pose"] = np.asarray(nO_to_nL_pose, dtype=np.float32)
|
||||
if self.type == namespace.Mode.TEST:
|
||||
diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
|
||||
voxel_threshold = diag*0.02
|
||||
model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
|
||||
pts_list = []
|
||||
for view in scanned_views:
|
||||
frame_idx = view[0]
|
||||
view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
|
||||
point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(view_path, binocular=True)
|
||||
cam_params = DataLoadUtil.load_cam_info(view_path, binocular=True)
|
||||
sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=self.filter_degree)
|
||||
pts_list.append(sampled_point_cloud)
|
||||
nL_to_world_pose = cam_params["cam_to_world"]
|
||||
nO_to_world_pose = cam_params["cam_to_world_O"]
|
||||
nO_to_nL_pose = np.dot(np.linalg.inv(nL_to_world_pose), nO_to_world_pose)
|
||||
data_item["scanned_target_pts_list"] = pts_list
|
||||
data_item["model_points_normals"] = model_points_normals
|
||||
data_item["voxel_threshold"] = voxel_threshold
|
||||
data_item["filter_degree"] = self.filter_degree
|
||||
data_item["scene_path"] = os.path.join(self.root_dir, scene_name)
|
||||
data_item["first_frame_to_world"] = np.asarray(first_frame_to_world, dtype=np.float32)
|
||||
data_item["nO_to_nL_pose"] = np.asarray(nO_to_nL_pose, dtype=np.float32)
|
||||
return data_item
|
||||
|
||||
def __len__(self):
|
||||
@ -186,6 +186,8 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
return collate_data
|
||||
return collate_fn
|
||||
|
||||
|
||||
# -------------- Debug ---------------- #
|
||||
if __name__ == "__main__":
|
||||
import torch
|
||||
seed = 0
|
||||
|
143
core/seq_dataset.py
Normal file
143
core/seq_dataset.py
Normal file
@ -0,0 +1,143 @@
|
||||
import numpy as np
|
||||
from PytorchBoot.dataset import BaseDataset
|
||||
import PytorchBoot.namespace as namespace
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
from PytorchBoot.utils.log_util import Log
|
||||
import torch
|
||||
import os
|
||||
import sys
|
||||
sys.path.append(r"/media/hofee/data/project/python/nbv_reconstruction/nbv_reconstruction")
|
||||
|
||||
from utils.data_load import DataLoadUtil
|
||||
from utils.pose import PoseUtil
|
||||
from utils.pts import PtsUtil
|
||||
|
||||
@stereotype.dataset("seq_nbv_reconstruction_dataset")
|
||||
class SeqNBVReconstructionDataset(BaseDataset):
|
||||
def __init__(self, config):
|
||||
super(SeqNBVReconstructionDataset, self).__init__(config)
|
||||
self.type = config["type"]
|
||||
if self.type != namespace.Mode.TEST:
|
||||
Log.error("Dataset <seq_nbv_reconstruction_dataset> Only support test mode", terminate=True)
|
||||
self.config = config
|
||||
self.root_dir = config["root_dir"]
|
||||
self.split_file_path = config["split_file"]
|
||||
self.scene_name_list = self.load_scene_name_list()
|
||||
self.datalist = self.get_datalist()
|
||||
self.pts_num = config["pts_num"]
|
||||
|
||||
self.model_dir = config["model_dir"]
|
||||
self.filter_degree = config["filter_degree"]
|
||||
|
||||
|
||||
def load_scene_name_list(self):
|
||||
scene_name_list = []
|
||||
with open(self.split_file_path, "r") as f:
|
||||
for line in f:
|
||||
scene_name = line.strip()
|
||||
scene_name_list.append(scene_name)
|
||||
return scene_name_list
|
||||
|
||||
def get_datalist(self):
|
||||
datalist = []
|
||||
for scene_name in self.scene_name_list:
|
||||
label_path = DataLoadUtil.get_label_path(self.root_dir, scene_name)
|
||||
label_data = DataLoadUtil.load_label(label_path)
|
||||
best_seq = label_data["best_sequence"]
|
||||
max_coverage_rate = label_data["max_coverage_rate"]
|
||||
first_frame = best_seq[0]
|
||||
datalist.append({
|
||||
"scene_name": scene_name,
|
||||
"first_frame": first_frame,
|
||||
"max_coverage_rate": max_coverage_rate
|
||||
})
|
||||
return datalist
|
||||
|
||||
def __getitem__(self, index):
|
||||
data_item_info = self.datalist[index]
|
||||
first_frame_idx = data_item_info["first_frame"][0]
|
||||
first_frame_coverage = data_item_info["first_frame"][1]
|
||||
max_coverage_rate = data_item_info["max_coverage_rate"]
|
||||
scene_name = data_item_info["scene_name"]
|
||||
first_cam_info = DataLoadUtil.load_cam_info(DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx), binocular=True)
|
||||
first_frame_to_world = first_cam_info["cam_to_world"]
|
||||
|
||||
first_view_path = DataLoadUtil.get_path(self.root_dir, scene_name, first_frame_idx)
|
||||
first_left_cam_pose = first_cam_info["cam_to_world"]
|
||||
first_right_cam_pose = first_cam_info["cam_to_world_R"]
|
||||
first_center_cam_pose = first_cam_info["cam_to_world_O"]
|
||||
|
||||
first_depth_L, first_depth_R = DataLoadUtil.load_depth(first_view_path, first_cam_info['near_plane'], first_cam_info['far_plane'], binocular=True)
|
||||
first_L_to_L_pose = np.eye(4)
|
||||
first_R_to_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_right_cam_pose)
|
||||
first_point_cloud_L = DataLoadUtil.get_point_cloud(first_depth_L, first_cam_info['cam_intrinsic'], first_L_to_L_pose)['points_world']
|
||||
first_point_cloud_R = DataLoadUtil.get_point_cloud(first_depth_R, first_cam_info['cam_intrinsic'], first_R_to_L_pose)['points_world']
|
||||
|
||||
first_point_cloud_L = PtsUtil.random_downsample_point_cloud(first_point_cloud_L, 65536)
|
||||
first_point_cloud_R = PtsUtil.random_downsample_point_cloud(first_point_cloud_R, 65536)
|
||||
first_overlap_points = DataLoadUtil.get_overlapping_points(first_point_cloud_L, first_point_cloud_R)
|
||||
first_downsampled_target_point_cloud = PtsUtil.random_downsample_point_cloud(first_overlap_points, self.pts_num)
|
||||
first_to_first_pose = np.eye(4)
|
||||
first_to_first_rot_6d = PoseUtil.matrix_to_rotation_6d_numpy(np.asarray(first_to_first_pose[:3,:3]))
|
||||
first_to_first_trans = first_to_first_pose[:3,3]
|
||||
first_to_first_9d = np.concatenate([first_to_first_rot_6d, first_to_first_trans], axis=0)
|
||||
diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
|
||||
voxel_threshold = diag*0.02
|
||||
first_O_to_first_L_pose = np.dot(np.linalg.inv(first_left_cam_pose), first_center_cam_pose)
|
||||
scene_path = os.path.join(self.root_dir, scene_name)
|
||||
data_item = {
|
||||
"first_pts": np.asarray([first_downsampled_target_point_cloud],dtype=np.float32),
|
||||
"first_to_first_9d": np.asarray([first_to_first_9d],dtype=np.float32),
|
||||
"scene_name": scene_name,
|
||||
"max_coverage_rate": max_coverage_rate,
|
||||
"voxel_threshold": voxel_threshold,
|
||||
"filter_degree": self.filter_degree,
|
||||
"first_frame_to_world": first_frame_to_world,
|
||||
"first_O_to_first_L_pose": first_O_to_first_L_pose,
|
||||
"first_frame_coverage": first_frame_coverage,
|
||||
"scene_path": scene_path
|
||||
}
|
||||
return data_item
|
||||
|
||||
def __len__(self):
|
||||
return len(self.datalist)
|
||||
|
||||
def get_collate_fn(self):
|
||||
def collate_fn(batch):
|
||||
collate_data = {}
|
||||
collate_data["first_pts"] = [torch.tensor(item['first_pts']) for item in batch]
|
||||
collate_data["first_to_first_9d"] = [torch.tensor(item['first_to_first_9d']) for item in batch]
|
||||
collate_data["first_frame_to_world"] = torch.stack([torch.tensor(item["first_frame_to_world"]) for item in batch])
|
||||
for key in batch[0].keys():
|
||||
if key not in ["first_pts", "first_to_first_9d", "first_frame_to_world"]:
|
||||
collate_data[key] = [item[key] for item in batch]
|
||||
return collate_data
|
||||
return collate_fn
|
||||
|
||||
# -------------- Debug ---------------- #
|
||||
if __name__ == "__main__":
|
||||
import torch
|
||||
seed = 0
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
config = {
|
||||
"root_dir": "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/scenes",
|
||||
"split_file": "/media/hofee/data/project/python/nbv_reconstruction/sample_for_training/OmniObject3d_train.txt",
|
||||
"model_dir": "/media/hofee/data/data/scaled_object_meshes",
|
||||
"ratio": 0.5,
|
||||
"batch_size": 2,
|
||||
"filter_degree": 75,
|
||||
"num_workers": 0,
|
||||
"pts_num": 32684,
|
||||
"type": namespace.Mode.TEST,
|
||||
}
|
||||
ds = SeqNBVReconstructionDataset(config)
|
||||
print(len(ds))
|
||||
#ds.__getitem__(10)
|
||||
dl = ds.get_loader(shuffle=True)
|
||||
for idx, data in enumerate(dl):
|
||||
data = ds.process_batch(data, "cuda:0")
|
||||
print(data)
|
||||
# ------ Debug Start ------
|
||||
import ipdb;ipdb.set_trace()
|
||||
# ------ Debug End ------+
|
188
runners/inferencer.py
Normal file
188
runners/inferencer.py
Normal file
@ -0,0 +1,188 @@
|
||||
import os
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
from PytorchBoot.config import ConfigManager
|
||||
import PytorchBoot.namespace as namespace
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
from PytorchBoot.factory import ComponentFactory
|
||||
from PytorchBoot.factory import OptimizerFactory
|
||||
|
||||
from PytorchBoot.dataset import BaseDataset
|
||||
from PytorchBoot.runners.runner import Runner
|
||||
from PytorchBoot.stereotype import EXTERNAL_FRONZEN_MODULES
|
||||
from PytorchBoot.utils import Log
|
||||
from PytorchBoot.status import status_manager
|
||||
|
||||
@stereotype.runner("nbv_evaluator")
|
||||
class NextBestViewEvaluator(Runner):
|
||||
def __init__(self, config_path):
|
||||
super().__init__(config_path)
|
||||
|
||||
''' Pipeline '''
|
||||
self.pipeline_name = self.config[namespace.Stereotype.PIPELINE]
|
||||
self.parallel = self.config["general"]["parallel"]
|
||||
self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name)
|
||||
if self.parallel and self.device == "cuda":
|
||||
self.pipeline = torch.nn.DataParallel(self.pipeline)
|
||||
self.pipeline = self.pipeline.to(self.device)
|
||||
|
||||
''' Experiment '''
|
||||
self.load_experiment("nbv_evaluator")
|
||||
|
||||
''' Test '''
|
||||
self.test_config = ConfigManager.get(namespace.Stereotype.RUNNER, namespace.Mode.TEST)
|
||||
self.test_dataset_name_list = self.test_config["dataset_list"]
|
||||
self.test_set_list = []
|
||||
self.test_writer_list = []
|
||||
seen_name = set()
|
||||
for test_dataset_name in self.test_dataset_name_list:
|
||||
if test_dataset_name not in seen_name:
|
||||
seen_name.add(test_dataset_name)
|
||||
else:
|
||||
raise ValueError("Duplicate test dataset name: {}".format(test_dataset_name))
|
||||
test_set: BaseDataset = ComponentFactory.create(namespace.Stereotype.DATASET, test_dataset_name)
|
||||
self.test_set_list.append(test_set)
|
||||
|
||||
|
||||
self.print_info()
|
||||
|
||||
def run(self):
|
||||
Log.info("Loading from epoch {}.".format(self.current_epoch))
|
||||
self.test()
|
||||
|
||||
|
||||
def test(self):
|
||||
self.pipeline.eval()
|
||||
with torch.no_grad():
|
||||
test_set: BaseDataset
|
||||
for dataset_idx, test_set in enumerate(self.test_set_list):
|
||||
test_set_config = test_set.get_config()
|
||||
eval_list = test_set_config["eval_list"]
|
||||
ratio = test_set_config["ratio"]
|
||||
test_set_name = test_set.get_name()
|
||||
|
||||
output_list = []
|
||||
data_list = []
|
||||
test_loader = test_set.get_loader()
|
||||
total=int(len(test_loader))
|
||||
loop = tqdm(enumerate(test_loader), total=total)
|
||||
for i, data in loop:
|
||||
status_manager.set_progress("train", "default_trainer", f"(test) Batch[{test_set_name}]", i+1, total)
|
||||
test_set.process_batch(data, self.device)
|
||||
data["mode"] = namespace.Mode.TEST
|
||||
output = self.pipeline(data)
|
||||
output_list.append(output)
|
||||
data_list.append(data)
|
||||
loop.set_description(
|
||||
f'Epoch [{self.current_epoch}/{self.max_epochs}] (Test: {test_set_name}, ratio={ratio})')
|
||||
result_dict = self.eval_fn(output_list, data_list, eval_list)
|
||||
|
||||
@staticmethod
|
||||
def eval_fn(output_list, data_list, eval_list):
|
||||
collected_result = {}
|
||||
for eval_method_name in eval_list:
|
||||
eval_method = ComponentFactory.create(namespace.Stereotype.EVALUATION_METHOD, eval_method_name)
|
||||
eval_results:dict = eval_method.evaluate(output_list, data_list)
|
||||
for data_type, eval_result in eval_results.items():
|
||||
if data_type not in collected_result:
|
||||
collected_result[data_type] = {}
|
||||
for name, value in eval_result.items():
|
||||
collected_result[data_type][name] = value
|
||||
status_manager.set_status("train", "default_trainer", f"[eval]{name}", value)
|
||||
|
||||
return collected_result
|
||||
|
||||
def get_checkpoint_path(self, is_last=False):
|
||||
return os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME,
|
||||
"Epoch_{}.pth".format(
|
||||
self.current_epoch if self.current_epoch != -1 and not is_last else "last"))
|
||||
|
||||
def load_checkpoint(self, is_last=False):
|
||||
self.load(self.get_checkpoint_path(is_last))
|
||||
Log.success(f"Loaded checkpoint from {self.get_checkpoint_path(is_last)}")
|
||||
if is_last:
|
||||
checkpoint_root = os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME)
|
||||
meta_path = os.path.join(checkpoint_root, "meta.json")
|
||||
if not os.path.exists(meta_path):
|
||||
raise FileNotFoundError(
|
||||
"No checkpoint meta.json file in the experiment {}".format(self.experiments_config["name"]))
|
||||
file_path = os.path.join(checkpoint_root, "meta.json")
|
||||
with open(file_path, "r") as f:
|
||||
meta = json.load(f)
|
||||
self.current_epoch = meta["last_epoch"]
|
||||
self.current_iter = meta["last_iter"]
|
||||
|
||||
def save_checkpoint(self, is_last=False):
|
||||
self.save(self.get_checkpoint_path(is_last))
|
||||
if not is_last:
|
||||
Log.success(f"Checkpoint at epoch {self.current_epoch} saved to {self.get_checkpoint_path(is_last)}")
|
||||
else:
|
||||
meta = {
|
||||
"last_epoch": self.current_epoch,
|
||||
"last_iter": self.current_iter,
|
||||
"time": str(datetime.now())
|
||||
}
|
||||
checkpoint_root = os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME)
|
||||
file_path = os.path.join(checkpoint_root, "meta.json")
|
||||
with open(file_path, "w") as f:
|
||||
json.dump(meta, f)
|
||||
|
||||
def load_experiment(self, backup_name=None):
|
||||
super().load_experiment(backup_name)
|
||||
if self.experiments_config["use_checkpoint"]:
|
||||
self.current_epoch = self.experiments_config["epoch"]
|
||||
self.load_checkpoint(is_last=(self.current_epoch == -1))
|
||||
|
||||
def create_experiment(self, backup_name=None):
|
||||
super().create_experiment(backup_name)
|
||||
ckpt_dir = os.path.join(str(self.experiment_path), namespace.Direcotry.CHECKPOINT_DIR_NAME)
|
||||
os.makedirs(ckpt_dir)
|
||||
tensorboard_dir = os.path.join(str(self.experiment_path), namespace.Direcotry.TENSORBOARD_DIR_NAME)
|
||||
os.makedirs(tensorboard_dir)
|
||||
|
||||
def load(self, path):
|
||||
state_dict = torch.load(path)
|
||||
if self.parallel:
|
||||
self.pipeline.module.load_state_dict(state_dict)
|
||||
else:
|
||||
self.pipeline.load_state_dict(state_dict)
|
||||
|
||||
def save(self, path):
|
||||
if self.parallel:
|
||||
state_dict = self.pipeline.module.state_dict()
|
||||
else:
|
||||
state_dict = self.pipeline.state_dict()
|
||||
|
||||
for name, module in self.pipeline.named_modules():
|
||||
if module.__class__ in EXTERNAL_FRONZEN_MODULES:
|
||||
if name in state_dict:
|
||||
del state_dict[name]
|
||||
|
||||
torch.save(state_dict, path)
|
||||
|
||||
|
||||
def print_info(self):
|
||||
def print_dataset(dataset: BaseDataset):
|
||||
config = dataset.get_config()
|
||||
name = dataset.get_name()
|
||||
Log.blue(f"Dataset: {name}")
|
||||
for k,v in config.items():
|
||||
Log.blue(f"\t{k}: {v}")
|
||||
|
||||
super().print_info()
|
||||
table_size = 70
|
||||
Log.blue(f"{'+' + '-' * (table_size // 2)} Pipeline {'-' * (table_size // 2)}" + '+')
|
||||
Log.blue(self.pipeline)
|
||||
Log.blue(f"{'+' + '-' * (table_size // 2)} Datasets {'-' * (table_size // 2)}" + '+')
|
||||
Log.blue("train dataset: ")
|
||||
print_dataset(self.train_set)
|
||||
for i, test_set in enumerate(self.test_set_list):
|
||||
Log.blue(f"test dataset {i}: ")
|
||||
print_dataset(test_set)
|
||||
|
||||
Log.blue(f"{'+' + '-' * (table_size // 2)}----------{'-' * (table_size // 2)}" + '+')
|
||||
|
@ -53,7 +53,10 @@ class StrategyGenerator(Runner):
|
||||
Log.info(f"Scene <{scene_name}> Already Exists, Skip")
|
||||
cnt += 1
|
||||
continue
|
||||
self.generate_sequence(root_dir, model_dir, scene_name,voxel_threshold, overlap_threshold)
|
||||
try:
|
||||
self.generate_sequence(root_dir, model_dir, scene_name,voxel_threshold, overlap_threshold)
|
||||
except Exception as e:
|
||||
Log.error(f"Scene <{scene_name}> Failed, Error: {e}")
|
||||
cnt += 1
|
||||
status_manager.set_progress("generate", "strategy_generator", "scene", total, total)
|
||||
status_manager.set_progress("generate", "strategy_generator", "dataset", len(dataset_name_list), len(dataset_name_list))
|
||||
|
Loading…
x
Reference in New Issue
Block a user