From c0c4f9b61078e73188e4a1ea509da47b06c47c26 Mon Sep 17 00:00:00 2001 From: hofee Date: Wed, 4 Jun 2025 13:46:52 +0800 Subject: [PATCH] update --- app_inference.py | 9 +- .../local/global_only_inference_config.yaml | 8 +- .../local/global_pts_and_local_pts_pose.yaml | 6 +- configs/local/heuristic_evaluation.yaml | 74 +++ .../local/local_only_inference_config.yaml | 10 +- configs/local/mlp_inference_config.yaml | 6 +- .../real_global_only_inference_config.yaml | 6 +- .../uncertainty_guide_evaluation_config.yaml | 12 +- .../seq_dataset_preprocessed.cpython-39.pyc | Bin 2610 -> 2601 bytes core/seq_dataset_preprocessed.py | 2 +- .../evaluate_heuristic.cpython-39.pyc | Bin 0 -> 12522 bytes runners/evaluate_heuristic.py | 428 ++++++++++++++++++ runners/ug_inference_server.py | 93 ++++ 13 files changed, 629 insertions(+), 25 deletions(-) create mode 100644 configs/local/heuristic_evaluation.yaml create mode 100644 runners/__pycache__/evaluate_heuristic.cpython-39.pyc create mode 100644 runners/evaluate_heuristic.py create mode 100644 runners/ug_inference_server.py diff --git a/app_inference.py b/app_inference.py index 76f51e0..e156e51 100644 --- a/app_inference.py +++ b/app_inference.py @@ -5,6 +5,7 @@ from runners.local_points_inferencer import LocalPointsInferencer from runners.inference_server import InferencerServer from runners.evaluate_uncertainty_guide import EvaluateUncertaintyGuide from runners.evaluate_pbnbv import EvaluatePBNBV +from runners.evaluate_heuristic import Heuristic @PytorchBootApplication("global_points_inference") class GlobalPointsInferenceApp: @@ -101,4 +102,10 @@ class EvaluateUncertaintyGuideApp: class EvaluatePBNBVApp: @staticmethod def start(): - EvaluatePBNBV("./configs/local/pbnbv_evalutaion_config.yaml").run() \ No newline at end of file + EvaluatePBNBV("./configs/local/pbnbv_evalutaion_config.yaml").run() + +@PytorchBootApplication("evaluate_heuristic") +class EvaluateHeuristicApp: + @staticmethod + def start(): + Heuristic("./configs/local/heuristic_evaluation.yaml").run() diff --git a/configs/local/global_only_inference_config.yaml b/configs/local/global_only_inference_config.yaml index 265edc6..43f3a42 100644 --- a/configs/local/global_only_inference_config.yaml +++ b/configs/local/global_only_inference_config.yaml @@ -15,16 +15,16 @@ runner: - OmniObject3d_test blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" - output_dir: "/media/hofee/data/project/exp/new_no_cluster_ab_global_pts_and_local_pose" + output_dir: "/media/hofee/data/project/exp/old_no_cluster_ab_global_pts_and_local_pose" pipeline: nbv_reconstruction_pipeline_global voxel_size: 0.003 min_new_area: 1.0 - overlap_limit: True + overlap_limit: True enable_cluster: False dataset: OmniObject3d_test: - root_dir: "/media/hofee/repository/final_test_set/preprocessed_dataset" - model_dir: "/media/hofee/data/data/target/target_formulated_view" + root_dir: "/media/hofee/data/data/new_testset_output" + model_dir: "/media/hofee/data/data/scaled_object_meshes" source: seq_reconstruction_dataset_preprocessed type: test filter_degree: 75 diff --git a/configs/local/global_pts_and_local_pts_pose.yaml b/configs/local/global_pts_and_local_pts_pose.yaml index abaee4b..ea3a583 100644 --- a/configs/local/global_pts_and_local_pts_pose.yaml +++ b/configs/local/global_pts_and_local_pts_pose.yaml @@ -15,7 +15,7 @@ runner: - OmniObject3d_test blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" - output_dir: "/media/hofee/data/project/exp/new_no_cluster_ab_global_pts_and_local_pts_pose" + output_dir: "/media/hofee/data/project/exp/old_no_cluster_ab_global_pts_and_local_pts_pose" pipeline: nbv_reconstruction_pipeline_local voxel_size: 0.003 min_new_area: 1.0 @@ -23,8 +23,8 @@ runner: enable_cluster: False dataset: OmniObject3d_test: - root_dir: "/media/hofee/repository/final_test_set/preprocessed_dataset" - model_dir: "/media/hofee/data/data/target/target_formulated_view" + root_dir: "/media/hofee/data/data/new_testset_output" + model_dir: "/media/hofee/data/data/scaled_object_meshes" source: seq_reconstruction_dataset_preprocessed # split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt" type: test diff --git a/configs/local/heuristic_evaluation.yaml b/configs/local/heuristic_evaluation.yaml new file mode 100644 index 0000000..e16230a --- /dev/null +++ b/configs/local/heuristic_evaluation.yaml @@ -0,0 +1,74 @@ + +runner: + general: + seed: 0 + device: cuda + cuda_visible_devices: "0,1,2,3,4,5,6,7" + + experiment: + name: exp_hemisphere_circle_trajectory + root_dir: "experiments" + epoch: -1 # -1 stands for last epoch + + test: + dataset_list: + - OmniObject3d_test + + blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" + output_dir: "/media/hofee/data/project/exp/old_no_limit_hemisphere_random" + pipeline: nbv_reconstruction_pipeline_local + voxel_size: 0.003 + min_new_area: 1.0 + overlap_limit: False + enable_cluster: False + heuristic_method: hemisphere_random + +dataset: + # OmniObject3d_train: + # root_dir: "C:\\Document\\Datasets\\inference_test1" + # model_dir: "C:\\Document\\Datasets\\scaled_object_meshes" + # source: seq_reconstruction_dataset_preprocessed + # split_file: "C:\\Document\\Datasets\\data_list\\sample.txt" + # type: test + # filter_degree: 75 + # ratio: 1 + # batch_size: 1 + # num_workers: 12 + # pts_num: 8192 + # load_from_preprocess: True + + OmniObject3d_test: + root_dir: "/media/hofee/data/data/new_testset_output" + model_dir: "/media/hofee/data/data/scaled_object_meshes" + source: seq_reconstruction_dataset_preprocessed + # split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt" + type: test + filter_degree: 75 + eval_list: + - pose_diff + - coverage_rate_increase + ratio: 0.1 + batch_size: 1 + num_workers: 12 + pts_num: 8192 + load_from_preprocess: True + +heuristic_methods: + hemisphere_random: + center: [0, 0, 0] + radius_fixed: True + fixed_radius: 0.6 + min_radius: 0.4 + max_radius: 0.8 + + hemisphere_circle_trajectory: + center: [0, 0, 0] + radius_fixed: False + fixed_radius: 0.6 + min_radius: 0.4 + max_radius: 0.8 + phi_list: [15, 45, 75] + circle_times: 12 + + + diff --git a/configs/local/local_only_inference_config.yaml b/configs/local/local_only_inference_config.yaml index 3cee732..6723eb8 100644 --- a/configs/local/local_only_inference_config.yaml +++ b/configs/local/local_only_inference_config.yaml @@ -15,12 +15,12 @@ runner: - OmniObject3d_test blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" - output_dir: "/media/hofee/data/project/exp/new_no_cluster_ab_local_only" + output_dir: "/media/hofee/data/project/exp/old_no_limit_ab_local_only" pipeline: nbv_reconstruction_pipeline_local_only voxel_size: 0.003 min_new_area: 1.0 - overlap_limit: True - enable_cluster: False + overlap_limit: False + enable_cluster: True dataset: # OmniObject3d_train: # root_dir: "C:\\Document\\Datasets\\inference_test1" @@ -36,8 +36,8 @@ dataset: # load_from_preprocess: True OmniObject3d_test: - root_dir: "/media/hofee/repository/final_test_set/preprocessed_dataset" - model_dir: "/media/hofee/data/data/target/target_formulated_view" + root_dir: "/media/hofee/data/data/new_testset_output" + model_dir: "/media/hofee/data/data/scaled_object_meshes" source: seq_reconstruction_dataset_preprocessed # split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt" type: test diff --git a/configs/local/mlp_inference_config.yaml b/configs/local/mlp_inference_config.yaml index 0dceec5..d44fc3f 100644 --- a/configs/local/mlp_inference_config.yaml +++ b/configs/local/mlp_inference_config.yaml @@ -15,7 +15,7 @@ runner: - OmniObject3d_test blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" - output_dir: "/media/hofee/data/project/exp/new_no_cluster_ab_mlp_inference" + output_dir: "/media/hofee/data/project/exp/old_no_cluster_ab_mlp_inference" pipeline: nbv_reconstruction_pipeline_mlp voxel_size: 0.003 min_new_area: 1.0 @@ -23,8 +23,8 @@ runner: enable_cluster: False dataset: OmniObject3d_test: - root_dir: "/media/hofee/repository/final_test_set/preprocessed_dataset" - model_dir: "/media/hofee/data/data/target/target_formulated_view" + root_dir: "/media/hofee/data/data/new_testset_output" + model_dir: "/media/hofee/data/data/scaled_object_meshes" source: seq_reconstruction_dataset_preprocessed # split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt" type: test diff --git a/configs/local/real_global_only_inference_config.yaml b/configs/local/real_global_only_inference_config.yaml index 9519f6b..7e2996a 100644 --- a/configs/local/real_global_only_inference_config.yaml +++ b/configs/local/real_global_only_inference_config.yaml @@ -15,7 +15,7 @@ runner: - OmniObject3d_test blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" - output_dir: "/media/hofee/data/project/exp/new_no_cluster_ab_global_only" + output_dir: "/media/hofee/data/project/exp/old_no_cluster_ab_global_only" pipeline: nbv_reconstruction_pipeline_global_only voxel_size: 0.003 min_new_area: 1.0 @@ -24,8 +24,8 @@ runner: dataset: OmniObject3d_test: - root_dir: "/media/hofee/repository/final_test_set/preprocessed_dataset" - model_dir: "/media/hofee/data/data/target/target_formulated_view" + root_dir: "/media/hofee/data/data/new_testset_output" + model_dir: "/media/hofee/data/data/scaled_object_meshes" source: seq_reconstruction_dataset_preprocessed type: test filter_degree: 75 diff --git a/configs/local/uncertainty_guide_evaluation_config.yaml b/configs/local/uncertainty_guide_evaluation_config.yaml index e0dba81..0393296 100644 --- a/configs/local/uncertainty_guide_evaluation_config.yaml +++ b/configs/local/uncertainty_guide_evaluation_config.yaml @@ -2,6 +2,7 @@ runner: general: seed: 0 + device: cuda cuda_visible_devices: "0,1,2,3,4,5,6,7" @@ -15,12 +16,13 @@ runner: - OmniObject3d_test blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" - output_dir: "/media/hofee/data/project/exp/new_no_limit_uncertainty_guide_evaluation" - output_data_root: "/media/hofee/repository/code/nbv_rec_uncertainty_guide/output/reconstruction" + output_dir: "/media/hofee/data/project/exp/old_uncertainty_guide_evaluation" + output_data_root: "/media/hofee/repository/code/nbv_rec_uncertainty_guide/last_result/reconstruction" pipeline: nbv_reconstruction_pipeline_global_only voxel_size: 0.003 min_new_area: 1.0 - overlap_limit: False + overlap_limit: True + dataset: # OmniObject3d_train: # root_dir: "C:\\Document\\Datasets\\inference_test1" @@ -36,8 +38,8 @@ dataset: # load_from_preprocess: True OmniObject3d_test: - root_dir: "/media/hofee/repository/final_test_set/preprocessed_dataset" - model_dir: "/media/hofee/data/data/target/target_formulated_view" + root_dir: "/media/hofee/data/data/new_testset_output" + model_dir: "/media/hofee/data/data/scaled_object_meshes" source: seq_reconstruction_dataset_preprocessed # split_file: "C:\\Document\\Datasets\\data_list\\OmniObject3d_test.txt" type: test diff --git a/core/__pycache__/seq_dataset_preprocessed.cpython-39.pyc b/core/__pycache__/seq_dataset_preprocessed.cpython-39.pyc index 6f31bcf366decb19fcc28168a724818070243f0a..e3ef773edae828968a8adf8aec3a1c1103ce7d51 100644 GIT binary patch delta 63 zcmdlavQmUMk(ZZ?0SF#>8D?zX$a{oISxG-PH6=4qKO;XaHB~<)u_O^fx;_4gh2T6(j%v delta 72 zcmZ1}vPpzDk(ZZ?0SMxr%4Zzd$a{oIM_WHPH6=4qKO;XaHC4YTwIIJZvn0Q$Qa>#- aFEJ;+B(=CCzBsi+zbrGgeDfD3Q+5DJK^Z{+ diff --git a/core/seq_dataset_preprocessed.py b/core/seq_dataset_preprocessed.py index 8b0ef99..25512b5 100644 --- a/core/seq_dataset_preprocessed.py +++ b/core/seq_dataset_preprocessed.py @@ -21,7 +21,7 @@ class SeqReconstructionDatasetPreprocessed(BaseDataset): super(SeqReconstructionDatasetPreprocessed, self).__init__(config) self.config = config self.root_dir = config["root_dir"] - self.real_root_dir = r"/media/hofee/repository/final_test_set/view" + self.real_root_dir = r"/media/hofee/data/data/new_testset" self.item_list = os.listdir(self.root_dir) def __getitem__(self, index): diff --git a/runners/__pycache__/evaluate_heuristic.cpython-39.pyc b/runners/__pycache__/evaluate_heuristic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c553ae1ddbc42908d288e5be5028b6c094372d2 GIT binary patch literal 12522 zcma)CS#TW3d7f)$@5KUG+#tEUK&(gtBqhr>ql#kjkQ7NUBhiw*Hodjn9smpM#hzJ` zSob=%L7TFxXfyF0$ zK&p}jrl;@e{`y=nOhQ4+sUXADDL0uxBRCA~DDZIm# zp=vsx4%&vRBl(daKU&S?Gb$gcj8(_;<0>DmOjIZHlPVvpOjW1z(<&dY%v5Ldvnrpc z?5gh0?^gL_WluGm&+3|!>Fi~xE&WN24YBk!jisIGO>H4T5k9*_N*=CbQycz zsWFFtz$;f!ikz#vPLL0u^V}di^1M^3*IbV`OJ2EN3u-Oz602fR@>IRHTwZyuSSzkL z92MzP^=hMDb86n%V#%xX4b;V-D7wz+qKDK&(SE*JtEtZBv-K5Jq+GA)HQhqBH%o$g zKU*&{`kaieI!#`7y>h8UOCS8%LBahdGLA-vmp86!%R1AUaV?TJnaQkcTHazIX5$@V zb1cD<*Yv#24k_^H`7oPjBWx6P5tdTSdr*hLum2p$uU`>L*K zp0=rVbfItQy5<=j)3eq>Yc|uj451am12u-Ki44@3swO&6W2u_hKut*1#HohaLR*Wg za$=xISk)w1q>V4G#RR3%8I46pwTy-}q=eQPVllP8^xAM|1nm*#&ok;#O$IfiTl!6% zN4zo0v&5$Hh1s_0jdv!5xus*4$(KN17XCItX|?om&HeQ;O;>*(LRz!S^sUMMx&Vi% zwQ04s8O%Aul2g=Uc7@ViD&6hv+0@z=T4zsd7tXdrA|$NKw2q6y>SRTT)?7<@ppec! zDsAeY(!Kqi0}93mQDd|TCc8Rw!lu4A^-E7-w}-Z{BBtROhnD2%#fqYwf?ML{hF545 zy;T{jH@!yFE3h(`vCH+VPNm?MTaHXr%e6wyxl$-{rzq1sTrN}{Z?(?y!#itSnXF%S zc%|4VRLa$|_twWUKX~$=TGt-GOGqP=wTqVv&gEjISp;=!^PTkvRI3V8-EG?aRUd9JD&xbLJk5Dp7Nronw*txL+7zvmKAEVZxE?63# zTc|awa;O4&FF04RoN^WXLmG8gS_-t*TD@G8Y1(OlJ8rWQfZTSjVw>nyZ#Xq+DIi!h z4Hnn$E3N11%#qglGcTN%aRNlJ5IL?2U69ds72IS})q`94#k4A3;bj29uQAak%_3tm z_970#Im3CKOMAJ_t3{6!ElazEvy7aALU7JHk<6SZy23wse^ZR*Sv<}2Da3!vrV%yLPDcGDbv+ z=Yhl7%%0NPVK258?<6p1QkWwA3bd4O7+YpLqUuv3vZeFyc|)Bvi%=`t0OyyqOCRj5 zDN5gAv2U0n#Nrd0u++-5&M`mH+oawSkSe0OxIe+yo`u# zVrG14j3q>@|D_M%RNJwS$1Z*FC9R+L>yBy1FxMp|*&yyA@Vn6IzG*EAE0Rk*MaL5k zXuTCb$Ln;AgeG?xnsSD;nB$gYs8*_1Tp4pqPR&u|ch6dAtXEox`{?8Iplq=M5@#D( zp9^JyKv^~FWVU5LQS?fyAHJtwxJ$U)3M&qAB%2$TQ3u@4;ouGY1e!RVzf2Ib0iCX3 zYFC;NFERoi?32Fs)FQ0mw18c-bKyDZg zR;f5u$g6wBiqy-1CRh(YM}5Y2e#hr?wl6TH?(Co<{xn8Cg-4OTW@hwBJz^yES=|5= ziJ+7K6S0ilD2*c*!82#HCbrR8pJq9oz}+cI$SSx?P`^vMR*4vO_xCPjhVUp-oCWg{ zKq-(&M6n)2M7l_+P+#elAP);k&0=DN4c!3Iwnwq5VU}Ju_#H)(A(TfkCR2MzwG6KtP?5l1 zyNVlH(%u*&#cr5(uGFOQwFxmKK-6oKVpPNhNR^G=2!F(EkG03!6Jl&dZ%>K|q*E(e zdm7IS9;j#ST_T0r-55C~CYdIZBF-{5EHJn|?Qt>PnHGD9wRkg~SurM{NUrTt@XE4S zdoP}S-tNwx6^)I33(UX0A2kO=229etkZm6n2iW+#HPq}C_Il{1?*2mT6Jg5nUqXSi zF8vkGFv}*y0kEn`Z?DK~>c~xry;K%9(rK|z<%!89XeBp|*7F|i_uvEfJBR%q0=wEL zro|9$raMR#l z6Axkh*RaZiY&U&f>{UD3D|P`g2Rn0Hzz<@bP)1)9b3SZX3}g}VX;7v0#p{UmYKC9ViES! zEIW9^MBOpe?Oxj>jv+nD=5846{bDh|kmFdn&JKy=B0GuIvUzb_?IGJ+xoQEV7uJCb zd`=wpcgAv)+PaRKOczpOW)OY~u0$ORg#WbS+KAYv&Sf9k7MTx?<845#y{9+p5zHFG ztmjmpqXf~e*sL`m_>SGQHhvDlcLIX%MFyuq<#_vH@o?uvpv*kX7DN;~SQIBP(xUIa z)43{vc;e$H2rB~dh*Ax6w(%zMsPJXC*e@oL7l zz264yHe*Brur7%%X}7YVa#2UgM3|$ z=1}vRE+qFbE4z(KaiicBVVPlcLuR&D`CZA@s-o*Eq+S7{l+~;G*@3E3nU`ohFOt3t z=S6G!1GV*9{Yov{Q-ZU;*>*CUGv$zP=6S1-+N<>nlgZ_Bg^UE|K#)5y)zp@2{+{9K zi1xbXOtq(h^*(4NdoIg_U`o?cYgTgli8qn`{kRrKKI8b zvny!C?;jsOkF|_voQt?W?70OY{zYQ<~yY%p|hlPtK~UwPxs zcRzFIyMK85tDUz#_2V~x@&`@QB<_6YGjF}}mA5{B9m`0fd?fqkk3RS2k3MnxPq+FD zd-W5yUi9n zZ+-Lb^)K{hz4e!0y!F`|C$oGUJ(^@Z4tBfqv+K9s_(JxvtZKRQ)1TgY`Hi<;`N8eZ z_1=nCe*OBd{`R?x=N@lPV6fliwpwbh3qD4M@2yvVa{KE)%r;38$liMG+FLh2bL;v~ z?tba(*i+&nS&p&uT*srlQt|le2lH-0A3+#R({ zMoOiV5&Mfe_)C_OwI&Q2mA+*Lmh(hI*@ayhDX|$ag?QXx$C}F zUnzU5IK4&##**9>!15h-6<&t>#e4V!-%ntG>s{j)XfILl)RJ?NJ`nZ&AI{YhFHn;S ztmj9m=SPv`-sRiXWRn5J95xC$Qe1H5E|=v(wGoX4N8fLg5uzb-)W|616xz!;Wdn3p zq{FN9y&5G~D7i{Aj?1;y>;i4M2J1Sl;hhb?C}e!7v!G7rhzE{A6Ryd3-4GUJujEx%%OD;tpW`o#Cl!g zioO9;pf~?4?fYd)K8GY9S1WlhSQmIx0^TGtN5OhZ@G1JGz)crYAoY)SlQv|Q;H$%- zl1w2Mlp-0^XLfRe$`4xc3^`i5D6PDs1Lrv1M=&S^+h+`S$spWaVpy&IQ6~Kioy~)I zaeizkaw|EQAHNT&RXt|&H_b`X^6$}ZZQu6aX|aA7Z6!P|JW`sbEYh?Y!P|yQjBMAK zzE7Vt6R=z(dd7^Q*3hlr*j55L>uuXgn-==mze(8SE<>IAvyE}oj-btg>x_C}v~C+G z3~KlLr%{ezW~wph@7J*~eV6*C(3VCnMJq94aK~lw?U+8T`lj&xm=S~ZoYB*8+hx(0 zJXR?^V?^{AeFN^jj7b)D1eP|6zr01(x0H9YF!!bF2eS|E+f#93ToyU<$}+R3{poR{?1^PK3blo+_#a8 z0vTQMFKIZvNu0a#M;_{*&Q5YD4D$LQ?%+LMBB9&;k%mfTbyVa674TRzby!h2t`&Vk zYlmu<(Ayz(gv!}S9ur~1N;cqdv~W~#HImKzg7&(;1P?61m%l~{$xZ$>O6U{(t4MMY zUm*M#c^M{7K)fy5W6!)r4Vf;JA?>Tj0sBl~;27lIZG67RCkDFgq;6WrOdpmJII@ZQ z5_)S-AC?bP2THV~?vFG~0-pjMab4hZ;L}E|&w%%q$`(MQyK@SsmH8L+f{zzwp$=ox z0C4S2SeWqj!uP$28W`gdm}^mHBFeTK6cIx^B72|&wA~kH6UV@(=#S!T%0mjO_&uyh z8dok#{Yoy;*U)yj6-mObgQmGF?MAt@UO{AqL8s#nVK|?s9(Lpl@fh+|LT#LAB1~{GCb`B5pY@RKV64(7v zzIPDEQ0N~-H`J=Y7h%S_$>)Xc&I6c{8ko+OPIwC@(n_46$U*^;g@(6!_ig;7MUpZX z-2po})DLPY&nMrFPs-?N7-Xe7GZvoQo;mmar$JGan*ao0&nEDIM0v8QE zDke=X3E%(Ut?9ZrgwB6X00alt))0wUgQF7I3ctLn1J_1LMYG|)1Ss8#mtZCwnA%#3 zS>!rWZpOjhmNz62!vI!K1MAaRg<>i9Vf1T*7#+kLv;v|KEUaS0BMQqSORWit4mm8_ z2bdoxYDHn#7gZp&b$qEl*xqY^pQ7Sq%eAbx>cH#YjZ`AKBrS#7qo~T0 zKAIclN6^augAy{ReHH2;<>n|kM9IHWLg>N&jgntcLe6|Sd={Txs(WW4l$Z)8_|dA7 z{xaNdd{t4RB0@qRndk1!VBmY_qqJ8aU5OZ_MNrs@dVb8d zKg%P8cC+YCXg3bT%YZl&P!m#blLC{_=RvZqsXii7uyz190b=1F>!nHmvPj4H&ofB&|lVF1O(A>?8_SaQhB29^}TWq&1n>thVmyxLsn9yOCL-O|zpe$3TpSr*XSuf}E%k+zScH zply?dCX@r(qp*QZ(a8(@;Fg8D9Ubo=K1$NjceDz~&@ByX+#Rfu?ztE$PH76O>r}louf7RJ9zE^f znIMl_^qv7%M@YsLhxD&2yo7HCQR0h;^+WYT;LXj2)Ka1F`Mv%Vy`;V_^~*bnBZ zaiMq3gCjbH_M^QYA`353?Gu#j zARC(LS=tt1zNV+bWZoQsU><=H(aJn%bKA6k3R&8CXj&H+-bkgS(3^=j0tdLw1I)!c zgg2x_+eSc%^n56s6ncZ+j~h@zMa>cCjzX9dPQwOh$AGahk^pddC zPL$A=6hmmEelg|;>PQ>ynJCCqgJle(lL zYA}iYKTA*H-V24G$!NEY6pU|M$vLQ`FsBcoC1JFV_VNDz;gj4ZPA2iWo__i%tN?Ku zA_yS4A_x<2A_&-7iXcLGQw-Y1TV3k_fvC7O%0PY)LR8r5O35Q22{3;`|90Gi9Tr?Q znFg9C7{;twxZoF_fgnKK+Tfo+Of#j3gOoZ;U5`r%LhCS)U(Z0afM^iUMt>l;D($RK z2mSqC!vX{f(j7w7dUyp&AV_Qk#`(x3Y2ndM%+D)Q8Wp4M4D|-#MHS(Vp%h1iI0NDu z7l>M_5#y>i#W3R{BgWB2{StIRu|0u4Nij}iKzMPM0O3urBujztRH_JVq*s?xtZj}adC z2#n)4u(qUVF*oF!$`h!8-`rU_0fOa<)Qm=%6X)c+sLd?l>YRzoEu5@Yj=RE8Dk&lQ z;(I7ZZtC2mPhIasSo{$6oTns92@#kyvHpCRHbEaJnE)U5D(QyGy{TW7`i9h7d>@V2 zOUZsp4p2hY8=s@(3@t<@O7Z$Kil0QiOOmHgX`nY4+9}W!xvvr{f=L%2IBG?o5)>Uns~} zf!xJS=*tC}C=@O=ixs~`U6 zG8tU(E5XB$8$W&EDM73tND(Vy8!j0Sa5&CK?8r>)_qF4(NR0kNv7wZe3MGak2RQZ3 z?f0!zG81H?iEGyGf*;`s!X18jSHW!j9xQQAY$d3;8S@y4<`xh~{ zwFF9-QiJ65;F#dr4_u_Ig9ZJxkFB4aw7ZX;H_Hz7we_4<^TWy literal 0 HcmV?d00001 diff --git a/runners/evaluate_heuristic.py b/runners/evaluate_heuristic.py new file mode 100644 index 0000000..b34042d --- /dev/null +++ b/runners/evaluate_heuristic.py @@ -0,0 +1,428 @@ +import os +import json +from utils.render import RenderUtil +from utils.pose import PoseUtil +from utils.pts import PtsUtil +from utils.reconstruction import ReconstructionUtil + +import torch +from tqdm import tqdm +import numpy as np +import pickle + +from PytorchBoot.config import ConfigManager +import PytorchBoot.namespace as namespace +import PytorchBoot.stereotype as stereotype +from PytorchBoot.factory import ComponentFactory + +from PytorchBoot.dataset import BaseDataset +from PytorchBoot.runners.runner import Runner +from PytorchBoot.utils import Log +from PytorchBoot.status import status_manager +from utils.data_load import DataLoadUtil + +@stereotype.runner("heuristic") +class Heuristic(Runner): + def __init__(self, config_path): + + super().__init__(config_path) + + self.script_path = ConfigManager.get(namespace.Stereotype.RUNNER, "blender_script_path") + self.output_dir = ConfigManager.get(namespace.Stereotype.RUNNER, "output_dir") + self.voxel_size = ConfigManager.get(namespace.Stereotype.RUNNER, "voxel_size") + self.min_new_area = ConfigManager.get(namespace.Stereotype.RUNNER, "min_new_area") + self.heuristic_method = ConfigManager.get(namespace.Stereotype.RUNNER, "heuristic_method") + self.heuristic_method_config = ConfigManager.get("heuristic_methods", self.heuristic_method) + self.overlap_limit = ConfigManager.get(namespace.Stereotype.RUNNER, "overlap_limit") + CM = 0.01 + self.min_new_pts_num = self.min_new_area * (CM / self.voxel_size) **2 + + + ''' Experiment ''' + self.load_experiment("nbv_evaluator") + self.stat_result_path = os.path.join(self.output_dir, "stat.json") + if os.path.exists(self.stat_result_path): + with open(self.stat_result_path, "r") as f: + self.stat_result = json.load(f) + else: + self.stat_result = {} + + ''' Test ''' + self.test_config = ConfigManager.get(namespace.Stereotype.RUNNER, namespace.Mode.TEST) + self.test_dataset_name_list = self.test_config["dataset_list"] + self.test_set_list = [] + self.test_writer_list = [] + seen_name = set() + for test_dataset_name in self.test_dataset_name_list: + if test_dataset_name not in seen_name: + seen_name.add(test_dataset_name) + else: + raise ValueError("Duplicate test dataset name: {}".format(test_dataset_name)) + test_set: BaseDataset = ComponentFactory.create(namespace.Stereotype.DATASET, test_dataset_name) + self.test_set_list.append(test_set) + self.print_info() + + + def run(self): + Log.info("Loading from epoch {}.".format(self.current_epoch)) + self.run_heuristic() + Log.success("Inference finished.") + + + def run_heuristic(self): + + test_set: BaseDataset + for dataset_idx, test_set in enumerate(self.test_set_list): + status_manager.set_progress("heuristic", "heuristic", f"dataset", dataset_idx, len(self.test_set_list)) + test_set_name = test_set.get_name() + + total=int(len(test_set)) + for i in tqdm(range(total), desc=f"Processing {test_set_name}", ncols=100): + try: + data = test_set.__getitem__(i) + scene_name = data["scene_name"] + inference_result_path = os.path.join(self.output_dir, test_set_name, f"{scene_name}.pkl") + if os.path.exists(inference_result_path): + Log.info(f"Inference result already exists for scene: {scene_name}") + continue + + status_manager.set_progress("heuristic", "heuristic", f"Batch[{test_set_name}]", i+1, total) + output = self.predict_sequence(data) + self.save_inference_result(test_set_name, data["scene_name"], output) + except Exception as e: + print(e) + Log.error(f"Error, {e}") + continue + + status_manager.set_progress("heuristic", "heuristic", f"dataset", len(self.test_set_list), len(self.test_set_list)) + + def predict_sequence(self, data, cr_increase_threshold=0, overlap_area_threshold=25, scan_points_threshold=10, max_iter=50, max_retry=10, max_success=3): + scene_name = data["scene_name"] + Log.info(f"Processing scene: {scene_name}") + status_manager.set_status("heuristic", "heuristic", "scene", scene_name) + + ''' data for rendering ''' + scene_path = data["scene_path"] + O_to_L_pose = data["O_to_L_pose"] + voxel_threshold = self.voxel_size + filter_degree = 75 + down_sampled_model_pts = data["gt_pts"] + + first_frame_to_world_9d = data["first_scanned_n_to_world_pose_9d"][0] + first_frame_to_world = np.eye(4) + first_frame_to_world[:3,:3] = PoseUtil.rotation_6d_to_matrix_numpy(first_frame_to_world_9d[:6]) + first_frame_to_world[:3,3] = first_frame_to_world_9d[6:] + + # 获取扫描点 + root = os.path.dirname(scene_path) + display_table_info = DataLoadUtil.get_display_table_info(root, scene_name) + radius = display_table_info["radius"] + scan_points = np.asarray(ReconstructionUtil.generate_scan_points(display_table_top=0,display_table_radius=radius)) + + # 生成位姿序列 + if self.heuristic_method == "hemisphere_random": + pose_sequence = self.generate_hemisphere_random_sequence( + max_iter, + self.heuristic_method_config + ) + elif self.heuristic_method == "hemisphere_circle_trajectory": + pose_sequence = self.generate_hemisphere_circle_sequence( + self.heuristic_method_config + ) + else: + raise ValueError(f"Unknown heuristic method: {self.heuristic_method}") + + # 执行第一帧 + first_frame_target_pts, _, first_frame_scan_points_indices = RenderUtil.render_pts( + first_frame_to_world, scene_path, self.script_path, scan_points, + voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose + ) + + # 初始化结果存储 + scanned_view_pts = [first_frame_target_pts] + history_indices = [first_frame_scan_points_indices] + pred_cr_seq = [] + retry_duplication_pose = [] + retry_no_pts_pose = [] + retry_overlap_pose = [] + pose_9d_seq = [first_frame_to_world_9d] + + last_pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, None, down_sampled_model_pts, threshold=voxel_threshold) + pred_cr_seq.append(last_pred_cr) + last_pts_num = PtsUtil.voxel_downsample_point_cloud(first_frame_target_pts, voxel_threshold).shape[0] + + # 执行序列 + retry = 0 + success = 0 + #import ipdb; ipdb.set_trace() + combined_scanned_pts_tensor = torch.tensor([0,0,0]) + cnt = 0 + for pred_pose in pose_sequence: + cnt += 1 + if retry >= max_retry or success >= max_success: + break + + Log.green(f"迭代: {cnt}/{len(pose_sequence)}, 重试: {retry}/{max_retry}, 成功: {success}/{max_success}") + + try: + new_target_pts, _, new_scan_points_indices = RenderUtil.render_pts( + pred_pose, scene_path, self.script_path, scan_points, + voxel_threshold=voxel_threshold, filter_degree=filter_degree, nO_to_nL_pose=O_to_L_pose + ) + + # 检查扫描点重叠 + if not ReconstructionUtil.check_scan_points_overlap(history_indices, new_scan_points_indices, scan_points_threshold): + curr_overlap_area_threshold = overlap_area_threshold + else: + curr_overlap_area_threshold = overlap_area_threshold * 0.5 + + # 检查点云重叠 + downsampled_new_target_pts = PtsUtil.voxel_downsample_point_cloud(new_target_pts, voxel_threshold) + overlap, _ = ReconstructionUtil.check_overlap( + downsampled_new_target_pts, down_sampled_model_pts, + overlap_area_threshold=curr_overlap_area_threshold, + voxel_size=voxel_threshold, + require_new_added_pts_num=True + ) + + if self.overlap_limit: + if not overlap: + Log.yellow("no overlap!") + retry += 1 + retry_overlap_pose.append(pred_pose.tolist()) + continue + + if new_target_pts.shape[0] == 0: + Log.red("新视角无点云") + retry_no_pts_pose.append(pred_pose.tolist()) + retry += 1 + continue + + history_indices.append(new_scan_points_indices) + + # 计算覆盖率 + pred_cr, _ = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold) + Log.yellow(f"覆盖率: {pred_cr}, 上一次: {last_pred_cr}, 最大: {data['seq_max_coverage_rate']}") + + # 更新结果 + pred_cr_seq.append(pred_cr) + scanned_view_pts.append(new_target_pts) + pose_6d = PoseUtil.matrix_to_rotation_6d_numpy(pred_pose[:3,:3]) + pose_9d = np.concatenate([ + pose_6d, + pred_pose[:3,3] + ]) + pose_9d_seq.append(pose_9d) + # 处理点云数据用于combined_scanned_pts + combined_scanned_pts = np.vstack(scanned_view_pts) + voxel_downsampled_pts, _ = self.voxel_downsample_with_mapping(combined_scanned_pts, voxel_threshold) + random_downsampled_pts, _ = PtsUtil.random_downsample_point_cloud(voxel_downsampled_pts, 8192, require_idx=True) + combined_scanned_pts_tensor = torch.tensor(random_downsampled_pts, dtype=torch.float32) + + + # 检查点数增量 + pts_num = voxel_downsampled_pts.shape[0] + Log.info(f"点数增量: {pts_num - last_pts_num}, 当前: {pts_num}, 上一次: {last_pts_num}") + + if pts_num - last_pts_num < self.min_new_pts_num: + if pred_cr <= data["seq_max_coverage_rate"] - 1e-2: + retry += 1 + retry_duplication_pose.append(pred_pose.tolist()) + Log.red(f"点数增量过小 < {self.min_new_pts_num}") + else: + success += 1 + Log.success(f"达到目标覆盖率") + + last_pts_num = pts_num + last_pred_cr = pred_cr + + if pred_cr >= data["seq_max_coverage_rate"] - 1e-3: + Log.success(f"达到最大覆盖率: {pred_cr}") + + + except Exception as e: + import traceback + traceback.print_exc() + Log.error(f"场景 {scene_path} 处理出错: {e}") + retry_no_pts_pose.append(pred_pose.tolist()) + retry += 1 + continue + + # 返回结果 + result = { + "pred_pose_9d_seq": pose_9d_seq, + "combined_scanned_pts_tensor": combined_scanned_pts_tensor, + "target_pts_seq": scanned_view_pts, + "coverage_rate_seq": pred_cr_seq, + "max_coverage_rate": data["seq_max_coverage_rate"], + "pred_max_coverage_rate": max(pred_cr_seq), + "scene_name": scene_name, + "retry_no_pts_pose": retry_no_pts_pose, + "retry_duplication_pose": retry_duplication_pose, + "retry_overlap_pose": retry_overlap_pose, + "best_seq_len": data["best_seq_len"], + } + + self.stat_result[scene_name] = { + "coverage_rate_seq": pred_cr_seq, + "pred_max_coverage_rate": max(pred_cr_seq), + "pred_seq_len": len(pred_cr_seq), + } + print('success rate: ', max(pred_cr_seq)) + + return result + + def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003): + voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32) + unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True) + idx_sort = np.argsort(inverse) + idx_unique = idx_sort[np.cumsum(counts)-counts] + downsampled_points = point_cloud[idx_unique] + return downsampled_points, inverse + + def compute_coverage_rate(self, scanned_view_pts, new_pts, model_pts, threshold=0.005): + if new_pts is not None: + new_scanned_view_pts = scanned_view_pts + [new_pts] + else: + new_scanned_view_pts = scanned_view_pts + combined_point_cloud = np.vstack(new_scanned_view_pts) + down_sampled_combined_point_cloud = PtsUtil.voxel_downsample_point_cloud(combined_point_cloud,threshold) + return ReconstructionUtil.compute_coverage_rate(model_pts, down_sampled_combined_point_cloud, threshold) + + + def save_inference_result(self, dataset_name, scene_name, output): + dataset_dir = os.path.join(self.output_dir, dataset_name) + if not os.path.exists(dataset_dir): + os.makedirs(dataset_dir) + output_path = os.path.join(dataset_dir, f"{scene_name}.pkl") + pickle.dump(output, open(output_path, "wb")) + with open(self.stat_result_path, "w") as f: + json.dump(self.stat_result, f) + + + def get_checkpoint_path(self, is_last=False): + return os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME, + "Epoch_{}.pth".format( + self.current_epoch if self.current_epoch != -1 and not is_last else "last")) + + def load_checkpoint(self, is_last=False): + self.load(self.get_checkpoint_path(is_last)) + Log.success(f"Loaded checkpoint from {self.get_checkpoint_path(is_last)}") + if is_last: + checkpoint_root = os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME) + meta_path = os.path.join(checkpoint_root, "meta.json") + if not os.path.exists(meta_path): + raise FileNotFoundError( + "No checkpoint meta.json file in the experiment {}".format(self.experiments_config["name"])) + file_path = os.path.join(checkpoint_root, "meta.json") + with open(file_path, "r") as f: + meta = json.load(f) + self.current_epoch = meta["last_epoch"] + self.current_iter = meta["last_iter"] + + def load_experiment(self, backup_name=None): + super().load_experiment(backup_name) + self.current_epoch = self.experiments_config["epoch"] + + def create_experiment(self, backup_name=None): + super().create_experiment(backup_name) + + + + def print_info(self): + def print_dataset(dataset: BaseDataset): + config = dataset.get_config() + name = dataset.get_name() + Log.blue(f"Dataset: {name}") + for k,v in config.items(): + Log.blue(f"\t{k}: {v}") + + super().print_info() + table_size = 70 + Log.blue(f"{'+' + '-' * (table_size // 2)} Datasets {'-' * (table_size // 2)}" + '+') + for i, test_set in enumerate(self.test_set_list): + Log.blue(f"test dataset {i}: ") + print_dataset(test_set) + + Log.blue(f"{'+' + '-' * (table_size // 2)}----------{'-' * (table_size // 2)}" + '+') + + def generate_hemisphere_random_sequence(self, max_iter, config): + """Generate a random hemisphere sampling sequence""" + radius_fixed = config["radius_fixed"] + fixed_radius = config["fixed_radius"] + min_radius = config["min_radius"] + max_radius = config["max_radius"] + poses = [] + center = np.array(config["center"]) + + for _ in range(max_iter): + # 随机采样方向 + direction = np.random.randn(3) + direction[2] = abs(direction[2]) # 确保在上半球 + direction = direction / np.linalg.norm(direction) + + # 确定半径 + if radius_fixed: + radius = fixed_radius + else: + radius = np.random.uniform(min_radius, max_radius) + + # 计算位置和朝向 + position = center + direction * radius + z_axis = -direction + y_axis = np.array([0, 0, 1]) + x_axis = np.cross(y_axis, z_axis) + x_axis = x_axis / np.linalg.norm(x_axis) + y_axis = np.cross(z_axis, x_axis) + + pose = np.eye(4) + pose[:3,:3] = np.stack([x_axis, y_axis, z_axis], axis=1) + pose[:3,3] = position + poses.append(pose) + + return poses + + def generate_hemisphere_circle_sequence(self, config): + """Generate a circular trajectory sampling sequence""" + radius_fixed = config["radius_fixed"] + fixed_radius = config["fixed_radius"] + min_radius = config["min_radius"] + max_radius = config["max_radius"] + phi_list = config["phi_list"] + circle_times = config["circle_times"] + + poses = [] + center = np.array(config["center"]) + + for phi in phi_list: # 仰角 + phi_rad = np.deg2rad(phi) + for i in range(circle_times): # 方位角 + theta = i * (2 * np.pi / circle_times) + + # 确定半径 + if radius_fixed: + radius = fixed_radius + else: + radius = np.random.uniform(min_radius, max_radius) + + # 球坐标转笛卡尔坐标 + x = radius * np.cos(theta) * np.sin(phi_rad) + y = radius * np.sin(theta) * np.sin(phi_rad) + z = radius * np.cos(phi_rad) + position = center + np.array([x, y, z]) + + # 计算朝向 + direction = (center - position) / np.linalg.norm(center - position) + z_axis = direction + y_axis = np.array([0, 0, 1]) + x_axis = np.cross(y_axis, z_axis) + x_axis = x_axis / np.linalg.norm(x_axis) + y_axis = np.cross(z_axis, x_axis) + + pose = np.eye(4) + pose[:3,:3] = np.stack([x_axis, y_axis, z_axis], axis=1) + pose[:3,3] = position + poses.append(pose) + + return poses + diff --git a/runners/ug_inference_server.py b/runners/ug_inference_server.py new file mode 100644 index 0000000..5f8e96c --- /dev/null +++ b/runners/ug_inference_server.py @@ -0,0 +1,93 @@ +import os +import json +import torch +import numpy as np +from flask import Flask, request, jsonify + +import PytorchBoot.namespace as namespace +import PytorchBoot.stereotype as stereotype +from PytorchBoot.factory import ComponentFactory + +from PytorchBoot.runners.runner import Runner +from PytorchBoot.utils import Log + +from utils.pts import PtsUtil +from beans.predict_result import PredictResult + +@stereotype.runner("ug_inference_server") +class UGInferencerServer(Runner): + def __init__(self, config_path): + super().__init__(config_path) + + ''' Web Server ''' + self.app = Flask(__name__) + ''' Pipeline ''' + self.pipeline_name = self.config[namespace.Stereotype.PIPELINE] + self.pipeline:torch.nn.Module = ComponentFactory.create(namespace.Stereotype.PIPELINE, self.pipeline_name) + self.pipeline = self.pipeline.to(self.device) + self.pts_num = 8192 + self.voxel_size = 0.002 + + ''' Experiment ''' + self.load_experiment("ug_inference_server") + + def get_result(self, output_data): + + pred_pose_9d = output_data["pred_pose_9d"] + pred_pose_9d = np.asarray(PredictResult(pred_pose_9d.cpu().numpy(), None, cluster_params=dict(eps=0.25, min_samples=3)).candidate_9d_poses, dtype=np.float32) + result = { + "pred_pose_9d": pred_pose_9d.tolist() + } + return result + + + def run(self): + Log.info("Loading from epoch {}.".format(self.current_epoch)) + + @self.app.route("/inference", methods=["POST"]) + def inference(): + data = request.json + input_data = self.get_input_data(data) + collated_input_data = self.collate_input(input_data) + output_data = self.pipeline.forward_test(collated_input_data) + result = self.get_result(output_data) + return jsonify(result) + + + self.app.run(host="0.0.0.0", port=5000) + + def get_checkpoint_path(self, is_last=False): + return os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME, + "Epoch_{}.pth".format( + self.current_epoch if self.current_epoch != -1 and not is_last else "last")) + + def load_checkpoint(self, is_last=False): + self.load(self.get_checkpoint_path(is_last)) + Log.success(f"Loaded checkpoint from {self.get_checkpoint_path(is_last)}") + if is_last: + checkpoint_root = os.path.join(self.experiment_path, namespace.Direcotry.CHECKPOINT_DIR_NAME) + meta_path = os.path.join(checkpoint_root, "meta.json") + if not os.path.exists(meta_path): + raise FileNotFoundError( + "No checkpoint meta.json file in the experiment {}".format(self.experiments_config["name"])) + file_path = os.path.join(checkpoint_root, "meta.json") + with open(file_path, "r") as f: + meta = json.load(f) + self.current_epoch = meta["last_epoch"] + self.current_iter = meta["last_iter"] + + def load_experiment(self, backup_name=None): + super().load_experiment(backup_name) + self.current_epoch = self.experiments_config["epoch"] + self.load_checkpoint(is_last=(self.current_epoch == -1)) + + def create_experiment(self, backup_name=None): + super().create_experiment(backup_name) + + + def load(self, path): + state_dict = torch.load(path) + self.pipeline.load_state_dict(state_dict) + + +