recover tool script
This commit is contained in:
parent
b5703cdc0e
commit
caab57e998
90
data_load.py
90
data_load.py
@ -39,10 +39,34 @@ class DataLoadUtil:
|
||||
np.savetxt(model_path, model_points)
|
||||
|
||||
@staticmethod
|
||||
def load_original_model_points(model_dir, object_name):
|
||||
def load_mesh_at(model_dir, object_name, world_object_pose):
|
||||
model_path = os.path.join(model_dir, object_name, "mesh.obj")
|
||||
mesh = trimesh.load(model_path)
|
||||
return mesh.vertices
|
||||
mesh.apply_transform(world_object_pose)
|
||||
return mesh
|
||||
|
||||
@staticmethod
|
||||
def save_mesh_at(model_dir, output_dir, object_name, scene_name, world_object_pose):
|
||||
mesh = DataLoadUtil.load_mesh_at(model_dir, object_name, world_object_pose)
|
||||
model_path = os.path.join(output_dir, scene_name, "world_mesh.obj")
|
||||
mesh.export(model_path)
|
||||
|
||||
@staticmethod
|
||||
def save_target_mesh_at_world_space(root, model_dir, scene_name):
|
||||
scene_info = DataLoadUtil.load_scene_info(root, scene_name)
|
||||
target_name = scene_info["target_name"]
|
||||
transformation = scene_info[target_name]
|
||||
location = transformation["location"]
|
||||
rotation_euler = transformation["rotation_euler"]
|
||||
pose_mat = trimesh.transformations.euler_matrix(*rotation_euler)
|
||||
pose_mat[:3, 3] = location
|
||||
|
||||
mesh = DataLoadUtil.load_mesh_at(model_dir, target_name, pose_mat)
|
||||
mesh_dir = os.path.join(root, scene_name, "mesh")
|
||||
if not os.path.exists(mesh_dir):
|
||||
os.makedirs(mesh_dir)
|
||||
model_path = os.path.join(mesh_dir, "world_target_mesh.obj")
|
||||
mesh.export(model_path)
|
||||
|
||||
@staticmethod
|
||||
def load_scene_info(root, scene_name):
|
||||
@ -63,7 +87,7 @@ class DataLoadUtil:
|
||||
return pose_mat
|
||||
|
||||
@staticmethod
|
||||
def load_depth(path, min_depth=0.01,max_depth=5.0,binocular=True):
|
||||
def load_depth(path, min_depth=0.01,max_depth=5.0,binocular=False):
|
||||
|
||||
def load_depth_from_real_path(real_path, min_depth, max_depth):
|
||||
depth = cv2.imread(real_path, cv2.IMREAD_UNCHANGED)
|
||||
@ -85,7 +109,7 @@ class DataLoadUtil:
|
||||
return depth_meters
|
||||
|
||||
@staticmethod
|
||||
def load_seg(path, binocular=True):
|
||||
def load_seg(path, binocular=False):
|
||||
if binocular:
|
||||
def clean_mask(mask_image):
|
||||
green = [0, 255, 0, 255]
|
||||
@ -94,7 +118,6 @@ class DataLoadUtil:
|
||||
mask_image = np.where(np.abs(mask_image - green) <= threshold, green, mask_image)
|
||||
mask_image = np.where(np.abs(mask_image - red) <= threshold, red, mask_image)
|
||||
return mask_image
|
||||
|
||||
mask_path_L = os.path.join(os.path.dirname(path), "mask", os.path.basename(path) + "_L.png")
|
||||
mask_image_L = clean_mask(cv2.imread(mask_path_L, cv2.IMREAD_UNCHANGED))
|
||||
mask_path_R = os.path.join(os.path.dirname(path), "mask", os.path.basename(path) + "_R.png")
|
||||
@ -102,7 +125,7 @@ class DataLoadUtil:
|
||||
return mask_image_L, mask_image_R
|
||||
else:
|
||||
mask_path = os.path.join(os.path.dirname(path), "mask", os.path.basename(path) + ".png")
|
||||
mask_image = cv2.imread(mask_path)
|
||||
mask_image = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
|
||||
return mask_image
|
||||
|
||||
@staticmethod
|
||||
@ -117,8 +140,6 @@ class DataLoadUtil:
|
||||
rgb_image = cv2.imread(rgb_path, cv2.IMREAD_COLOR)
|
||||
return rgb_image
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def cam_pose_transformation(cam_pose_before):
|
||||
offset = np.asarray([
|
||||
@ -173,38 +194,45 @@ class DataLoadUtil:
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_point_cloud_world_from_path(path, binocular=True):
|
||||
def get_target_point_cloud_world_from_path(path, binocular=False, random_downsample_N=65536, voxel_size = 0.005, target_mask_label=(0,255,0,255)):
|
||||
cam_info = DataLoadUtil.load_cam_info(path, binocular=binocular)
|
||||
if binocular:
|
||||
voxel_size = 0.005
|
||||
|
||||
depth_L, depth_R = DataLoadUtil.load_depth(path, cam_info['near_plane'], cam_info['far_plane'], binocular=True)
|
||||
mask_L, mask_R = DataLoadUtil.load_seg(path, binocular=True)
|
||||
|
||||
point_cloud_L = DataLoadUtil.get_target_point_cloud(depth_L, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask_L)['points_world']
|
||||
point_cloud_R = DataLoadUtil.get_target_point_cloud(depth_R, cam_info['cam_intrinsic'], cam_info['cam_to_world_R'], mask_R)['points_world']
|
||||
point_cloud_L = PtsUtil.random_downsample_point_cloud(point_cloud_L, 16384)
|
||||
point_cloud_R = PtsUtil.random_downsample_point_cloud(point_cloud_R, 16384)
|
||||
|
||||
voxel_indices_L = np.floor(point_cloud_L / voxel_size).astype(np.int32)
|
||||
voxel_indices_R = np.floor(point_cloud_R / voxel_size).astype(np.int32)
|
||||
voxels_L = set(map(tuple, voxel_indices_L))
|
||||
voxels_R = set(map(tuple, voxel_indices_R))
|
||||
overlap_voxels = voxels_L.intersection(voxels_R)
|
||||
overlap_points = point_cloud_L[np.array([tuple(v) in overlap_voxels for v in voxel_indices_L])]
|
||||
point_cloud_L = DataLoadUtil.get_target_point_cloud(depth_L, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask_L, target_mask_label)['points_world']
|
||||
point_cloud_R = DataLoadUtil.get_target_point_cloud(depth_R, cam_info['cam_intrinsic'], cam_info['cam_to_world_R'], mask_R, target_mask_label)['points_world']
|
||||
point_cloud_L = PtsUtil.random_downsample_point_cloud(point_cloud_L, random_downsample_N)
|
||||
point_cloud_R = PtsUtil.random_downsample_point_cloud(point_cloud_R, random_downsample_N)
|
||||
overlap_points = DataLoadUtil.get_overlapping_points(point_cloud_L, point_cloud_R, voxel_size)
|
||||
return overlap_points
|
||||
else:
|
||||
depth = DataLoadUtil.load_depth(path, cam_info['near_plane'], cam_info['far_plane'])
|
||||
mask = DataLoadUtil.load_seg(path)
|
||||
point_cloud = DataLoadUtil.get_target_point_cloud(depth, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask)['points_world']
|
||||
return point_cloud
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_point_cloud_list_from_seq(root, scene_name, num_frames, binocular=False):
|
||||
point_cloud_list = []
|
||||
for frame_idx in range(num_frames):
|
||||
path = DataLoadUtil.get_path(root, scene_name, frame_idx)
|
||||
point_cloud = DataLoadUtil.get_point_cloud_world_from_path(path, binocular)
|
||||
point_cloud_list.append(point_cloud)
|
||||
return point_cloud_list
|
||||
|
||||
def voxelize_points(points, voxel_size):
|
||||
|
||||
voxel_indices = np.floor(points / voxel_size).astype(np.int32)
|
||||
unique_voxels = np.unique(voxel_indices, axis=0, return_inverse=True)
|
||||
return unique_voxels
|
||||
|
||||
@staticmethod
|
||||
def get_overlapping_points(point_cloud_L, point_cloud_R, voxel_size=0.005):
|
||||
voxels_L, indices_L = DataLoadUtil.voxelize_points(point_cloud_L, voxel_size)
|
||||
voxels_R, _ = DataLoadUtil.voxelize_points(point_cloud_R, voxel_size)
|
||||
|
||||
voxel_indices_L = voxels_L.view([('', voxels_L.dtype)]*3)
|
||||
voxel_indices_R = voxels_R.view([('', voxels_R.dtype)]*3)
|
||||
overlapping_voxels = np.intersect1d(voxel_indices_L, voxel_indices_R)
|
||||
mask_L = np.isin(indices_L, np.where(np.isin(voxel_indices_L, overlapping_voxels))[0])
|
||||
overlapping_points = point_cloud_L[mask_L]
|
||||
return overlapping_points
|
||||
|
||||
@staticmethod
|
||||
def load_points_normals(root, scene_name):
|
||||
points_path = os.path.join(root, scene_name, "points_and_normals.txt")
|
||||
points_normals = np.loadtxt(points_path)
|
||||
return points_normals
|
@ -1,19 +1,52 @@
|
||||
import numpy as np
|
||||
import open3d as o3d
|
||||
from pts import PtsUtil
|
||||
from scipy.spatial import cKDTree
|
||||
|
||||
class ReconstructionUtil:
|
||||
@staticmethod
|
||||
def reconstruct_with_pts(pts):
|
||||
pcd = o3d.geometry.PointCloud()
|
||||
pcd.points = o3d.utility.Vector3dVector(pts)
|
||||
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.001, max_nn=30))
|
||||
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
|
||||
mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=9)
|
||||
densities = np.asarray(densities)
|
||||
vertices_to_remove = densities < np.quantile(densities, 0.03)
|
||||
vertices_to_remove = densities < np.quantile(densities, 0.2)
|
||||
mesh.remove_vertices_by_mask(vertices_to_remove)
|
||||
return mesh
|
||||
|
||||
@staticmethod
|
||||
def filter_points(points, points_normals, cam_pose, voxel_size=0.005, theta=45):
|
||||
sampled_points = PtsUtil.voxel_downsample_point_cloud(points, voxel_size)
|
||||
kdtree = cKDTree(points_normals[:,:3])
|
||||
_, indices = kdtree.query(sampled_points)
|
||||
nearest_points = points_normals[indices]
|
||||
|
||||
normals = nearest_points[:, 3:]
|
||||
camera_axis = -cam_pose[:3, 2]
|
||||
normals_normalized = normals / np.linalg.norm(normals, axis=1, keepdims=True)
|
||||
cos_theta = np.dot(normals_normalized, camera_axis)
|
||||
theta_rad = np.deg2rad(theta)
|
||||
filtered_sampled_points= sampled_points[cos_theta > np.cos(theta_rad)]
|
||||
|
||||
return filtered_sampled_points[:, :3]
|
||||
|
||||
if __name__ == "__main__":
|
||||
path = r"C:\Document\Local Project\nbv_rec_visualize\mis\sampled_model_points.txt"
|
||||
test_pts = np.loadtxt(path)
|
||||
mesh = ReconstructionUtil.reconstruct_with_pts(test_pts)
|
||||
o3d.io.write_triangle_mesh("output_mesh.obj", mesh)
|
||||
import os
|
||||
root = "/media/hofee/data/project/python/nbv_reconstruction/nbv_rec_visualize/data/sample/"
|
||||
name = "google_scan-box_0106"
|
||||
model_path = os.path.join(root, name, "sampled_model_points.txt")
|
||||
rec_path = os.path.join(root, name, "best_reconstructed_pts.txt")
|
||||
model_pts = np.loadtxt(model_path)
|
||||
rec_pts = np.loadtxt(rec_path)
|
||||
import time
|
||||
start = time.time()
|
||||
model_mesh = ReconstructionUtil.reconstruct_with_pts(model_pts)
|
||||
end = time.time()
|
||||
print(f"Time taken to reconstruct model: {end-start}")
|
||||
rec_mesh = ReconstructionUtil.reconstruct_with_pts(rec_pts)
|
||||
output_dir = "/media/hofee/data/project/python/nbv_reconstruction/nbv_rec_visualize/mis/output_test"
|
||||
model_output_path = os.path.join(output_dir, f"{name}_model_mesh.obj")
|
||||
rec_output_path = os.path.join(output_dir, f"{name}_rec_mesh.obj")
|
||||
o3d.io.write_triangle_mesh(model_output_path, model_mesh)
|
||||
o3d.io.write_triangle_mesh(rec_output_path, rec_mesh)
|
Loading…
x
Reference in New Issue
Block a user