recover backup

This commit is contained in:
hofee 2024-09-20 15:54:01 +08:00
parent 85f606a2a3
commit b5703cdc0e
2 changed files with 47 additions and 20 deletions

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
# ---> Python
/static
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]

66
app.py
View File

@ -1,25 +1,39 @@
from flask import Flask, request, jsonify
from flask import Flask, request, jsonify, send_from_directory
import os
import json
import base64
import pickle
import numpy as np
import pickle
from flask_cors import CORS
from data_load import DataLoadUtil
from reconstruction import ReconstructionUtil
from pts import PtsUtil
app = Flask(__name__)
ROOT = os.path.join("./static")
print(ROOT)
app = Flask(__name__, static_folder="static")
CORS(app)
ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
print(ROOT)
@app.route("/")
def serve_index():
return send_from_directory(app.static_folder, "index.html")
@app.route('/<path:filename>')
def serve_static(filename):
return send_from_directory(app.static_folder, filename)
@app.route('/get_scene_list', methods=['POST'])
def get_scene_list():
data = request.json
dataset_name = data.get('dataset_name')
dataset_path = os.path.join(ROOT, dataset_name)
if not os.path.exists(dataset_path):
print(f"Dataset not found: {dataset_path}")
return jsonify({"error": "Dataset not found"}), 404
scene_list = [d for d in os.listdir(dataset_path) if os.path.isdir(os.path.join(dataset_path, d))]
return jsonify({"scene_list": scene_list, "success": True})
@ -28,15 +42,20 @@ def get_scene_info():
data = request.json
dataset_name = data.get('dataset_name')
scene_name = data.get('scene_name')
scene_path = os.path.join(ROOT, dataset_name, scene_name)
camera_params_path = os.path.join(scene_path, 'camera_params')
label_json_path = os.path.join(scene_path, 'label.json')
if not os.path.exists(scene_path) or not os.path.exists(label_json_path):
return jsonify({"error": "Scene or label.json not found"}), 404
with open(label_json_path, 'r') as f:
label_data = json.load(f)
sequence_length = len([f for f in os.listdir(camera_params_path) if os.path.isfile(os.path.join(camera_params_path, f))])
max_coverage_rate = label_data.get('max_coverage_rate')
best_sequence = label_data.get('best_sequence')
best_sequence_length = len(best_sequence)
@ -64,21 +83,25 @@ def read_image_as_base64(file_path):
except FileNotFoundError:
return None
@app.route('/get_frame_data', methods=['POST'])
def get_frame_data():
data = request.json
dataset_name = data.get('dataset_name')
scene_name = data.get('scene_name')
sequence = data.get('sequence')
sequence = data.get('sequence')
scene_path = os.path.join(ROOT, dataset_name, scene_name)
root = os.path.join(ROOT, dataset_name)
camera_params_path = os.path.join(scene_path, 'camera_params')
depth_path = os.path.join(scene_path, 'depth')
mask_path = os.path.join(scene_path, 'mask')
points_and_normals_path = os.path.join(scene_path, 'points_and_normals.txt')
points_and_normals = np.loadtxt(points_and_normals_path)
model_points = points_and_normals[:, :3]
model_points_normals = DataLoadUtil.load_points_normals(root, scene_name)
model_points = model_points_normals[:, :3]
obj_path = os.path.join(dataset_name, scene_name, 'mesh', 'world_target_mesh.obj')
mtl_path = os.path.join(dataset_name, scene_name, 'mesh', 'material.mtl')
if not all([os.path.exists(scene_path), os.path.exists(camera_params_path), os.path.exists(depth_path), os.path.exists(mask_path)]):
return jsonify({"error": "Invalid paths or files not found"}), 404
@ -89,26 +112,29 @@ def get_frame_data():
for frame_info in sequence:
frame_id = frame_info.get('frame')
frame_data = {}
camera_params_file = os.path.join(camera_params_path, f'{frame_id}.json')
if os.path.exists(camera_params_file):
with open(camera_params_file, 'r') as f:
camera_params = json.load(f)
frame_data['camera_params'] = camera_params
else:
frame_data['camera_params'] = None
path = DataLoadUtil.get_path(root, scene_name, frame_id)
cam_params = DataLoadUtil.load_cam_info(path, binocular=True)
frame_data['cam_to_world'] = cam_params['cam_to_world'].tolist()
depth_file = os.path.join(depth_path, f'{frame_id}_L.png')
depth_base64 = read_image_as_base64(depth_file)
frame_data['depth'] = depth_base64 if depth_base64 else None
mask_file = os.path.join(mask_path, f'{frame_id}_L.png')
mask_base64 = read_image_as_base64(mask_file)
frame_data['mask'] = mask_base64 if mask_base64 else None
path = DataLoadUtil.get_path(root, scene_name, frame_id)
point_cloud = DataLoadUtil.get_point_cloud_world_from_path(path)
sampled_point_cloud = PtsUtil.voxel_downsample_point_cloud(point_cloud, 0.01)
point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(path, binocular=True)
sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_params['cam_to_world'], theta=75)
sampled_point_cloud = PtsUtil.voxel_downsample_point_cloud(sampled_point_cloud, 0.01)
frame_data['new_point_cloud'] = sampled_point_cloud.tolist()
frame_data['combined_point_cloud'] = combined_point_cloud.tolist()
combined_point_cloud = np.concatenate([combined_point_cloud, sampled_point_cloud], axis=0)
combined_point_cloud = PtsUtil.voxel_downsample_point_cloud(combined_point_cloud, 0.01)
frame_data["coverage_rate"] = frame_info.get('coverage_rate')
@ -121,7 +147,7 @@ def get_frame_data():
"data": frame_data
})
return jsonify({"seq_frame_data": result,"model_pts":model_points, "success": True})
return jsonify({"seq_frame_data": result,"model_pts":model_points.tolist(), "obj_path": obj_path, "mtl_path":mtl_path, "success": True})
@app.route('/analysis_inference_result', methods=['POST'])
def analysis_inference_result():