From cca1ce78760b8c7da62782c7e46f597e93ee2a34 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 25 Sep 2024 15:55:16 +0100 Subject: [PATCH 01/83] refactor: rename nerfstudio cloud final output --- scripts/reconstruction_benchmark/nerf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 505750e..54081e6 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -71,7 +71,8 @@ def run_nerfstudio(ns_config): cloud.transform(scale_matrix) cloud.transform(np.linalg.inv(ns_se3)) - o3d.io.write_point_cloud(str(output_cloud_file.with_name("input_scale.ply")), cloud) + final_metric_cloud_file = f"{ns_config["method"]}_cloud_metric.ply" + o3d.io.write_point_cloud(str(output_cloud_file.with_name(final_metric_cloud_file)), cloud) def run_nerfstudio_exporter(config_file, export_method): From 92a96bd1390c0130e7b112efeb055b87e210118f Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 25 Sep 2024 16:04:38 +0100 Subject: [PATCH 02/83] refactor: save ns cloud to recon benchmark folder --- scripts/reconstruction_benchmark/main.py | 6 +++++- scripts/reconstruction_benchmark/nerf.py | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 6322be3..295a827 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -67,6 +67,9 @@ def __init__(self, project_folder, sensor): self.ns_model_dir = self.ns_data_dir / "trained_models" logger.info(f"Project folder: {self.project_folder}") + self.recon_benchmark_dir = self.output_folder / "recon_benchmark" + self.recon_benchmark_dir.mkdir(exist_ok=True, parents=True) + def process_gt_cloud(self): print_with_colour("Creating Octree and merged cloud from ground truth clouds") processPCDFolder(str(self.gt_individual_folder), self.octomap_resolution, str(self.gt_octree_path)) @@ -146,7 +149,8 @@ def compute_sim3(self): def run_nerfstudio(self, method="nerfacto", json_filename="transforms_metric.json"): assert self.ns_data_dir.exists(), f"nerfstudio directory not found at {self.ns_data_dir}" ns_config = generate_nerfstudio_config(method, self.ns_data_dir / json_filename, self.ns_model_dir) - run_nerfstudio(ns_config) + final_cloud_file = run_nerfstudio(ns_config) + final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) if __name__ == "__main__": diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 54081e6..120134a 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -71,8 +71,9 @@ def run_nerfstudio(ns_config): cloud.transform(scale_matrix) cloud.transform(np.linalg.inv(ns_se3)) - final_metric_cloud_file = f"{ns_config["method"]}_cloud_metric.ply" - o3d.io.write_point_cloud(str(output_cloud_file.with_name(final_metric_cloud_file)), cloud) + final_metric_cloud_file = output_cloud_file.with_name(f'{ns_config["method"]}_cloud_metric.ply') + o3d.io.write_point_cloud(str(final_metric_cloud_file), cloud) + return final_metric_cloud_file def run_nerfstudio_exporter(config_file, export_method): From b6bb2ceccedf959df8c6404136e94a616d2fe447 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 26 Sep 2024 01:08:33 +0100 Subject: [PATCH 03/83] refactor: save mvs cloud to recon benchmark folder --- scripts/reconstruction_benchmark/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 295a827..332d224 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -137,7 +137,7 @@ def compute_sim3(self): T_lidar_colmap = align(lidar_slam_traj_cam_frame, colmap_traj_single_cam, self.colmap_output_folder) rescale_colmap_json(colmap_traj_file, T_lidar_colmap, rescaled_colmap_traj_file) mvs_cloud_file = self.mvs_output_folder / "scene_dense_nerf_world.ply" - scaled_mvs_cloud_file = self.mvs_output_folder / "scene_dense_nerf_world_scaled.ply" + self.scaled_mvs_cloud_file = self.recon_benchmark_dir / "OpenMVS_dense_cloud_metric.ply" rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, scaled_mvs_cloud_file) rescaled_colmap_traj = NeRFTrajReader(rescaled_colmap_traj_file).read_file() pose_to_ply(rescaled_colmap_traj, self.colmap_output_folder / "rescaled_colmap_traj.ply", [0.0, 1.0, 0.0]) From c797b2e8553dc35ee22fe9184bdfa077acbf5edf Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 26 Sep 2024 00:52:14 +0100 Subject: [PATCH 04/83] refactor: reorganise lidar cloud evaluation in recon benchmakr folder --- .../lidar_cloud_eval.py | 34 ------------- scripts/reconstruction_benchmark/main.py | 51 ++++++++++++++----- 2 files changed, 38 insertions(+), 47 deletions(-) diff --git a/scripts/reconstruction_benchmark/lidar_cloud_eval.py b/scripts/reconstruction_benchmark/lidar_cloud_eval.py index dc023da..e69de29 100644 --- a/scripts/reconstruction_benchmark/lidar_cloud_eval.py +++ b/scripts/reconstruction_benchmark/lidar_cloud_eval.py @@ -1,34 +0,0 @@ -from pathlib import Path - -import numpy as np -import open3d as o3d - -from oxford_spires_utils.eval import get_recon_metrics, save_error_cloud -from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds -from spires_cpp import convertOctreeToPointCloud, processPCDFolder, removeUnknownPoints - - -def evaluate_lidar_cloud( - project_folder, - lidar_cloud_folder_path, - gt_octree_path, - gt_cloud_path, - octomap_resolution=0.1, - downsample_voxel_size=0.05, -): - input_cloud_bt_path = Path(project_folder) / "input_cloud.bt" - processPCDFolder(str(lidar_cloud_folder_path), octomap_resolution, str(input_cloud_bt_path)) - - input_cloud_free_path = str(Path(input_cloud_bt_path).with_name(f"{Path(input_cloud_bt_path).stem}_free.pcd")) - input_cloud_occ_path = str(Path(input_cloud_bt_path).with_name(f"{Path(input_cloud_bt_path).stem}_occ.pcd")) - convertOctreeToPointCloud(str(input_cloud_bt_path), str(input_cloud_free_path), str(input_cloud_occ_path)) - - input_cloud_merged_path = Path(project_folder) / "input_cloud_merged.pcd" - _ = merge_downsample_vilens_slam_clouds(lidar_cloud_folder_path, downsample_voxel_size, input_cloud_merged_path) - input_cloud_filtered_path = Path(project_folder) / "input_cloud_merged_filtered.pcd" - removeUnknownPoints(str(input_cloud_merged_path), str(gt_octree_path), str(input_cloud_filtered_path)) - input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(input_cloud_filtered_path)).points) - gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(gt_cloud_path)).points) - - print(get_recon_metrics(input_cloud_np, gt_cloud_np)) - save_error_cloud(input_cloud_np, gt_cloud_np, str(Path(project_folder) / "input_error.pcd")) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 332d224..84f5342 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -4,20 +4,21 @@ from pathlib import Path import numpy as np +import open3d as o3d import yaml -from lidar_cloud_eval import evaluate_lidar_cloud from mvs import rescale_openmvs_cloud, run_openmvs from nerf import create_nerfstudio_dir, generate_nerfstudio_config, run_nerfstudio from sfm import rescale_colmap_json, run_colmap from oxford_spires_utils.bash_command import print_with_colour +from oxford_spires_utils.eval import get_recon_metrics, save_error_cloud from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds from oxford_spires_utils.sensor import Sensor from oxford_spires_utils.trajectory.align import align from oxford_spires_utils.trajectory.file_interfaces import NeRFTrajReader, VilensSlamTrajReader from oxford_spires_utils.trajectory.utils import pose_to_ply -from oxford_spires_utils.utils import convert_e57_folder_to_pcd_folder, transform_pcd_folder -from spires_cpp import convertOctreeToPointCloud, processPCDFolder +from oxford_spires_utils.utils import transform_pcd_folder +from spires_cpp import convertOctreeToPointCloud, processPCDFolder, removeUnknownPoints logger = logging.getLogger(__name__) @@ -50,11 +51,13 @@ def __init__(self, project_folder, sensor): self.lidar_output_folder.mkdir(exist_ok=True, parents=True) self.colmap_output_folder = self.output_folder / "colmap" self.colmap_output_folder.mkdir(exist_ok=True, parents=True) + self.recon_benchmark_dir = self.output_folder / "recon_benchmark" + self.recon_benchmark_dir.mkdir(exist_ok=True, parents=True) # TODO: check lidar cloud folder has viewpoints and is pcd, check gt folder is pcd, check image folder is jpg/png self.octomap_resolution = 0.1 self.cloud_downsample_voxel_size = 0.05 - self.gt_octree_path = self.output_folder / "gt_cloud.bt" - self.gt_cloud_merged_path = self.output_folder / "gt_cloud_merged.pcd" + self.gt_octree_path = self.recon_benchmark_dir / "gt_cloud.bt" + self.gt_cloud_merged_path = self.recon_benchmark_dir / "gt_cloud_merged.pcd" self.colmap_sparse_folder = self.colmap_output_folder / "sparse" / "0" self.openmvs_bin = "/usr/local/bin/OpenMVS" @@ -67,8 +70,8 @@ def __init__(self, project_folder, sensor): self.ns_model_dir = self.ns_data_dir / "trained_models" logger.info(f"Project folder: {self.project_folder}") - self.recon_benchmark_dir = self.output_folder / "recon_benchmark" - self.recon_benchmark_dir.mkdir(exist_ok=True, parents=True) + self.lidar_cloud_merged_path = self.recon_benchmark_dir / "lidar_cloud_merged.pcd" + def process_gt_cloud(self): print_with_colour("Creating Octree and merged cloud from ground truth clouds") @@ -89,15 +92,28 @@ def evaluate_lidar_clouds(self): self.octomap_resolution, ) - def tranform_lidar_clouds(self, transform_matrix_path=None): + def process_lidar_clouds(self, transform_matrix_path=None): + logger.info("Transforming lidar clouds to the same frame as the ground truth clouds") if transform_matrix_path is None: transform_matrix_path = self.project_folder / "T_gt_lidar.txt" assert transform_matrix_path.exists(), f"Transform matrix not found at {transform_matrix_path}" transform_matrix = np.loadtxt(transform_matrix_path) - new_individual_clouds_folder = self.project_folder / "lidar_clouds_transformed" + new_individual_clouds_folder = self.lidar_output_folder / "lidar_clouds_transformed" transform_pcd_folder(self.individual_clouds_folder, new_individual_clouds_folder, transform_matrix) self.individual_clouds_folder = new_individual_clouds_folder - + logger.info("Creating Octree from transformed lidar clouds") + lidar_cloud_octomap_file = self.lidar_output_folder / "lidar_cloud.bt" + processPCDFolder(str(self.individual_clouds_folder), self.octomap_resolution, str(lidar_cloud_octomap_file)) + logger.info("Converting Octree to point cloud") + lidar_cloud_free_path = Path(lidar_cloud_octomap_file).with_name( + f"{Path(lidar_cloud_octomap_file).stem}_free.pcd" + ) + lidar_cloud_occ_path = Path(lidar_cloud_octomap_file).with_name( + f"{Path(lidar_cloud_octomap_file).stem}_occ.pcd" + ) + convertOctreeToPointCloud(str(lidar_cloud_octomap_file), str(lidar_cloud_free_path), str(lidar_cloud_occ_path)) + logger.info("Merging and downsampling lidar clouds") + _ = merge_downsample_vilens_slam_clouds(self.individual_clouds_folder, self.cloud_downsample_voxel_size, self.lidar_cloud_merged_path) def run_colmap(self): run_colmap(self.image_folder, self.colmap_output_folder) create_nerfstudio_dir(self.colmap_output_folder, self.ns_data_dir, self.image_folder) @@ -151,7 +167,16 @@ def run_nerfstudio(self, method="nerfacto", json_filename="transforms_metric.jso ns_config = generate_nerfstudio_config(method, self.ns_data_dir / json_filename, self.ns_model_dir) final_cloud_file = run_nerfstudio(ns_config) final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) - + + def evaluate_reconstruction(self, input_cloud_path): + assert input_cloud_path.exists(), f"Input cloud not found at {input_cloud_path}" + assert self.gt_octree_path.exists(), f"Ground truth octree not found at {self.gt_octree_path}" + filtered_input_cloud_path = Path(input_cloud_path).with_name(f"{Path(input_cloud_path).stem}_filtered.pcd") + removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) + input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) + gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) + print(get_recon_metrics(input_cloud_np, gt_cloud_np)) + save_error_cloud(input_cloud_np, gt_cloud_np, str(Path(self.project_folder) / "input_error.pcd")) if __name__ == "__main__": setup_logging() @@ -165,8 +190,8 @@ def run_nerfstudio(self, method="nerfacto", json_filename="transforms_metric.jso project_folder = "/home/oxford_spires_dataset/data/2024-03-13-observatory-quarter-01" recon_benchmark = ReconstructionBenchmark(project_folder, sensor) recon_benchmark.process_gt_cloud() - recon_benchmark.tranform_lidar_clouds() - recon_benchmark.evaluate_lidar_clouds() + recon_benchmark.process_lidar_clouds() + recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_cloud_merged_path) recon_benchmark.run_colmap() recon_benchmark.run_openmvs() recon_benchmark.compute_sim3() From 09faae801b3a112b2b96783b32acfb0f22c89802 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 26 Sep 2024 10:00:05 +0100 Subject: [PATCH 05/83] refactor: separate load lidar-gt transform --- scripts/reconstruction_benchmark/main.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 84f5342..db27dfd 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -13,6 +13,7 @@ from oxford_spires_utils.bash_command import print_with_colour from oxford_spires_utils.eval import get_recon_metrics, save_error_cloud from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds +from oxford_spires_utils.se3 import is_se3_matrix from oxford_spires_utils.sensor import Sensor from oxford_spires_utils.trajectory.align import align from oxford_spires_utils.trajectory.file_interfaces import NeRFTrajReader, VilensSlamTrajReader @@ -72,7 +73,6 @@ def __init__(self, project_folder, sensor): self.lidar_cloud_merged_path = self.recon_benchmark_dir / "lidar_cloud_merged.pcd" - def process_gt_cloud(self): print_with_colour("Creating Octree and merged cloud from ground truth clouds") processPCDFolder(str(self.gt_individual_folder), self.octomap_resolution, str(self.gt_octree_path)) @@ -92,14 +92,18 @@ def evaluate_lidar_clouds(self): self.octomap_resolution, ) - def process_lidar_clouds(self, transform_matrix_path=None): - logger.info("Transforming lidar clouds to the same frame as the ground truth clouds") + def load_lidar_gt_transform(self, transform_matrix_path=None): if transform_matrix_path is None: transform_matrix_path = self.project_folder / "T_gt_lidar.txt" + logger.info(f"Loading transform matrix from {transform_matrix_path}") assert transform_matrix_path.exists(), f"Transform matrix not found at {transform_matrix_path}" - transform_matrix = np.loadtxt(transform_matrix_path) + self.transform_matrix = np.loadtxt(transform_matrix_path) + assert is_se3_matrix(self.transform_matrix)[0], is_se3_matrix(self.transform_matrix)[1] + + def process_lidar_clouds(self): + logger.info("Transforming lidar clouds to the same frame as the ground truth clouds") new_individual_clouds_folder = self.lidar_output_folder / "lidar_clouds_transformed" - transform_pcd_folder(self.individual_clouds_folder, new_individual_clouds_folder, transform_matrix) + transform_pcd_folder(self.individual_clouds_folder, new_individual_clouds_folder, self.transform_matrix) self.individual_clouds_folder = new_individual_clouds_folder logger.info("Creating Octree from transformed lidar clouds") lidar_cloud_octomap_file = self.lidar_output_folder / "lidar_cloud.bt" @@ -113,7 +117,10 @@ def process_lidar_clouds(self, transform_matrix_path=None): ) convertOctreeToPointCloud(str(lidar_cloud_octomap_file), str(lidar_cloud_free_path), str(lidar_cloud_occ_path)) logger.info("Merging and downsampling lidar clouds") - _ = merge_downsample_vilens_slam_clouds(self.individual_clouds_folder, self.cloud_downsample_voxel_size, self.lidar_cloud_merged_path) + _ = merge_downsample_vilens_slam_clouds( + self.individual_clouds_folder, self.cloud_downsample_voxel_size, self.lidar_cloud_merged_path + ) + def run_colmap(self): run_colmap(self.image_folder, self.colmap_output_folder) create_nerfstudio_dir(self.colmap_output_folder, self.ns_data_dir, self.image_folder) @@ -167,7 +174,7 @@ def run_nerfstudio(self, method="nerfacto", json_filename="transforms_metric.jso ns_config = generate_nerfstudio_config(method, self.ns_data_dir / json_filename, self.ns_model_dir) final_cloud_file = run_nerfstudio(ns_config) final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) - + def evaluate_reconstruction(self, input_cloud_path): assert input_cloud_path.exists(), f"Input cloud not found at {input_cloud_path}" assert self.gt_octree_path.exists(), f"Ground truth octree not found at {self.gt_octree_path}" @@ -178,6 +185,7 @@ def evaluate_reconstruction(self, input_cloud_path): print(get_recon_metrics(input_cloud_np, gt_cloud_np)) save_error_cloud(input_cloud_np, gt_cloud_np, str(Path(self.project_folder) / "input_error.pcd")) + if __name__ == "__main__": setup_logging() logger.info("Starting Reconstruction Benchmark") @@ -189,6 +197,7 @@ def evaluate_reconstruction(self, input_cloud_path): convert_e57_folder_to_pcd_folder(gt_cloud_folder_e57_path, gt_cloud_folder_pcd_path) project_folder = "/home/oxford_spires_dataset/data/2024-03-13-observatory-quarter-01" recon_benchmark = ReconstructionBenchmark(project_folder, sensor) + recon_benchmark.load_lidar_gt_transform() recon_benchmark.process_gt_cloud() recon_benchmark.process_lidar_clouds() recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_cloud_merged_path) From 46cfe4f35ff67459a39fc05d59473982063323de Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 26 Sep 2024 10:01:52 +0100 Subject: [PATCH 06/83] feat: transform mvs cloud to gt frame --- scripts/reconstruction_benchmark/main.py | 15 +++++++++++---- scripts/reconstruction_benchmark/mvs.py | 7 +++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index db27dfd..fc7c20d 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -6,7 +6,7 @@ import numpy as np import open3d as o3d import yaml -from mvs import rescale_openmvs_cloud, run_openmvs +from mvs import rescale_openmvs_cloud, run_openmvs, transform_cloud_to_gt_frame from nerf import create_nerfstudio_dir, generate_nerfstudio_config, run_nerfstudio from sfm import rescale_colmap_json, run_colmap @@ -160,8 +160,12 @@ def compute_sim3(self): T_lidar_colmap = align(lidar_slam_traj_cam_frame, colmap_traj_single_cam, self.colmap_output_folder) rescale_colmap_json(colmap_traj_file, T_lidar_colmap, rescaled_colmap_traj_file) mvs_cloud_file = self.mvs_output_folder / "scene_dense_nerf_world.ply" - self.scaled_mvs_cloud_file = self.recon_benchmark_dir / "OpenMVS_dense_cloud_metric.ply" - rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, scaled_mvs_cloud_file) + self.scaled_mvs_cloud_file = self.mvs_output_folder / "OpenMVS_dense_cloud_metric.pcd" + rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, self.scaled_mvs_cloud_file) + self.scaled_mvs_cloud_gt_frame_file = self.recon_benchmark_dir / "OpenMVS_dense_cloud_gt_frame.pcd" + transform_cloud_to_gt_frame( + self.scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file + ) rescaled_colmap_traj = NeRFTrajReader(rescaled_colmap_traj_file).read_file() pose_to_ply(rescaled_colmap_traj, self.colmap_output_folder / "rescaled_colmap_traj.ply", [0.0, 1.0, 0.0]) pose_to_ply(lidar_slam_traj, self.colmap_output_folder / "lidar_slam_traj.ply", [1.0, 0.0, 0.0]) @@ -177,12 +181,14 @@ def run_nerfstudio(self, method="nerfacto", json_filename="transforms_metric.jso def evaluate_reconstruction(self, input_cloud_path): assert input_cloud_path.exists(), f"Input cloud not found at {input_cloud_path}" + assert Path(input_cloud_path).suffix == ".pcd", "Input cloud must be a pcd file" assert self.gt_octree_path.exists(), f"Ground truth octree not found at {self.gt_octree_path}" filtered_input_cloud_path = Path(input_cloud_path).with_name(f"{Path(input_cloud_path).stem}_filtered.pcd") + logger.info(f'Removing unknown points from "{input_cloud_path}" using {self.gt_octree_path}') removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) - print(get_recon_metrics(input_cloud_np, gt_cloud_np)) + logger.info(get_recon_metrics(input_cloud_np, gt_cloud_np)) save_error_cloud(input_cloud_np, gt_cloud_np, str(Path(self.project_folder) / "input_error.pcd")) @@ -204,5 +210,6 @@ def evaluate_reconstruction(self, input_cloud_path): recon_benchmark.run_colmap() recon_benchmark.run_openmvs() recon_benchmark.compute_sim3() + recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") recon_benchmark.run_nerfstudio("splatfacto") diff --git a/scripts/reconstruction_benchmark/mvs.py b/scripts/reconstruction_benchmark/mvs.py index 3be9d09..34dd1fb 100644 --- a/scripts/reconstruction_benchmark/mvs.py +++ b/scripts/reconstruction_benchmark/mvs.py @@ -136,6 +136,13 @@ def rescale_openmvs_cloud(original_cloud_file, sim3_matrix, output_cloud_file): logger.info(f"Rescaled OpenMVS point cloud to to metric and save as {output_cloud_file}") +def transform_cloud_to_gt_frame(cloud_file, se3_matrix, output_cloud_file): + cloud = o3d.io.read_point_cloud(str(cloud_file)) + cloud.transform(se3_matrix) + o3d.io.write_point_cloud(str(output_cloud_file), cloud) + logger.info(f"Transformed point cloud to the ground truth frame and saved as {output_cloud_file}") + + if __name__ == "__main__": image_path = "/home/yifu/data/nerf_data_pipeline/2024-03-13-maths_1/raw" colmap_output_path = "/home/yifu/data/nerf_data_pipeline/2024-03-13-maths_1/processed/output_colmap" From 629b664f02cc11f2f2dd87deeeaf1d7a783c10c3 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 26 Sep 2024 15:04:53 +0100 Subject: [PATCH 07/83] feat: add colmap sequential matcher option --- scripts/reconstruction_benchmark/main.py | 4 ++-- scripts/reconstruction_benchmark/sfm.py | 25 +++++++++++++++++------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index fc7c20d..a71d662 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -121,8 +121,8 @@ def process_lidar_clouds(self): self.individual_clouds_folder, self.cloud_downsample_voxel_size, self.lidar_cloud_merged_path ) - def run_colmap(self): - run_colmap(self.image_folder, self.colmap_output_folder) + def run_colmap(self, matcher="vocab_tree_matcher"): + run_colmap(self.image_folder, self.colmap_output_folder, matcher=matcher) create_nerfstudio_dir(self.colmap_output_folder, self.ns_data_dir, self.image_folder) def run_openmvs(self): diff --git a/scripts/reconstruction_benchmark/sfm.py b/scripts/reconstruction_benchmark/sfm.py index a84355b..17f8876 100644 --- a/scripts/reconstruction_benchmark/sfm.py +++ b/scripts/reconstruction_benchmark/sfm.py @@ -49,7 +49,9 @@ def get_vocab_tree(image_num) -> Path: return vocab_tree_filename -def run_colmap(image_path, output_path, camera_model="OPENCV_FISHEYE"): +def run_colmap( + image_path, output_path, camera_model="OPENCV_FISHEYE", matcher="vocab_tree_matcher", loop_detection_period=10 +): logger.debug(f"Running colmap; img_path {image_path}; output: {output_path}, {camera_model}") assert camera_model in camera_model_list, f"{camera_model} not supported. Supported models: {camera_model_list}" database_path = output_path / "database.db" @@ -72,14 +74,23 @@ def run_colmap(image_path, output_path, camera_model="OPENCV_FISHEYE"): logger.debug(f"Total number of images in COLMAP database: {total_image_num}") image_num = len(list(image_path.rglob("*"))) - colmap_vocab_tree_matcher_cmd = [ - "colmap vocab_tree_matcher", + colmap_matcher_cmd = [ + f"colmap {matcher}", f"--database_path {database_path}", - f"--VocabTreeMatching.vocab_tree_path {get_vocab_tree(image_num)}", ] - colmap_vocab_tree_matcher_cmd = " ".join(colmap_vocab_tree_matcher_cmd) - logger.info(f"Running {colmap_vocab_tree_matcher_cmd}") - run_command(colmap_vocab_tree_matcher_cmd, print_command=False) + if matcher == "vocab_tree_matcher": + colmap_matcher_cmd.append(f"--VocabTreeMatching.vocab_tree_path {get_vocab_tree(image_num)}") + elif matcher == "sequential_matcher": + colmap_matcher_cmd.append("--SequentialMatching.loop_detection 1") + colmap_matcher_cmd.append(f"--SequentialMatching.vocab_tree_path {get_vocab_tree(image_num)}") + colmap_matcher_cmd.append(f"--SequentialMatching.loop_detection_period {loop_detection_period}") + else: + raise ValueError( + f"matcher {matcher} not supported. Supported matchers: ['vocab_tree_matcher', 'sequential_matcher']" + ) + colmap_matcher_cmd = " ".join(colmap_matcher_cmd) + logger.info(f"Running {colmap_matcher_cmd}") + run_command(colmap_matcher_cmd, print_command=False) mapper_ba_global_function_tolerance = 1e-5 colmap_mapper_cmd = [ From bb7098e1d79586b868fda6942c89db0e228c431a Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 27 Sep 2024 00:36:05 +0100 Subject: [PATCH 08/83] refactor: move gt e57 conversion to class function --- scripts/reconstruction_benchmark/main.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index a71d662..813ce28 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -18,7 +18,7 @@ from oxford_spires_utils.trajectory.align import align from oxford_spires_utils.trajectory.file_interfaces import NeRFTrajReader, VilensSlamTrajReader from oxford_spires_utils.trajectory.utils import pose_to_ply -from oxford_spires_utils.utils import transform_pcd_folder +from oxford_spires_utils.utils import convert_e57_folder_to_pcd_folder, transform_pcd_folder from spires_cpp import convertOctreeToPointCloud, processPCDFolder, removeUnknownPoints logger = logging.getLogger(__name__) @@ -45,7 +45,6 @@ def __init__(self, project_folder, sensor): self.sensor = sensor self.camera_for_alignment = "cam_front" self.image_folder = self.project_folder / "images" - self.gt_individual_folder = self.project_folder / "gt_clouds" self.individual_clouds_folder = self.project_folder / "lidar_clouds" self.output_folder = self.project_folder / "outputs" self.lidar_output_folder = self.output_folder / "lidar" @@ -59,6 +58,8 @@ def __init__(self, project_folder, sensor): self.cloud_downsample_voxel_size = 0.05 self.gt_octree_path = self.recon_benchmark_dir / "gt_cloud.bt" self.gt_cloud_merged_path = self.recon_benchmark_dir / "gt_cloud_merged.pcd" + self.gt_cloud_individual_e57_folder = self.project_folder / "gt" / "individual_e57_clouds" + self.gt_cloud_individual_pcd_folder = self.project_folder / "gt" / "individual_pcd_clouds" self.colmap_sparse_folder = self.colmap_output_folder / "sparse" / "0" self.openmvs_bin = "/usr/local/bin/OpenMVS" @@ -74,13 +75,16 @@ def __init__(self, project_folder, sensor): self.lidar_cloud_merged_path = self.recon_benchmark_dir / "lidar_cloud_merged.pcd" def process_gt_cloud(self): - print_with_colour("Creating Octree and merged cloud from ground truth clouds") - processPCDFolder(str(self.gt_individual_folder), self.octomap_resolution, str(self.gt_octree_path)) + logger.info("Converting ground truth clouds from e57 to pcd") + convert_e57_folder_to_pcd_folder(self.gt_cloud_individual_e57_folder, self.gt_cloud_individual_pcd_folder) + logger.info("Creating Octree and merged cloud from ground truth clouds") + processPCDFolder(str(self.gt_cloud_individual_pcd_folder), self.octomap_resolution, str(self.gt_octree_path)) gt_cloud_free_path = str(Path(self.gt_octree_path).with_name(f"{Path(self.gt_octree_path).stem}_free.pcd")) gt_cloud_occ_path = str(Path(self.gt_octree_path).with_name(f"{Path(self.gt_octree_path).stem}_occ.pcd")) convertOctreeToPointCloud(str(self.gt_octree_path), str(gt_cloud_free_path), str(gt_cloud_occ_path)) + logger.info("Merging and downsampling ground truth clouds") _ = merge_downsample_vilens_slam_clouds( - self.gt_individual_folder, self.cloud_downsample_voxel_size, self.gt_cloud_merged_path + self.gt_cloud_individual_pcd_folder, self.cloud_downsample_voxel_size, self.gt_cloud_merged_path ) def evaluate_lidar_clouds(self): @@ -198,9 +202,6 @@ def evaluate_reconstruction(self, input_cloud_path): with open(Path(__file__).parent.parent.parent / "config" / "sensor.yaml", "r") as f: sensor_config = yaml.safe_load(f)["sensor"] sensor = Sensor(**sensor_config) - gt_cloud_folder_e57_path = "/home/oxford_spires_dataset/data/2024-03-13-maths_1/gt_individual_e57" - gt_cloud_folder_pcd_path = "/home/oxford_spires_dataset/data/2024-03-13-maths_1/gt_clouds" - convert_e57_folder_to_pcd_folder(gt_cloud_folder_e57_path, gt_cloud_folder_pcd_path) project_folder = "/home/oxford_spires_dataset/data/2024-03-13-observatory-quarter-01" recon_benchmark = ReconstructionBenchmark(project_folder, sensor) recon_benchmark.load_lidar_gt_transform() From de53d0688e6c33a0b83b76fd525a09290120b9da Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 27 Sep 2024 00:45:13 +0100 Subject: [PATCH 09/83] refactor: rename and reorganise lidar slam files --- scripts/reconstruction_benchmark/main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 813ce28..847326a 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -45,7 +45,8 @@ def __init__(self, project_folder, sensor): self.sensor = sensor self.camera_for_alignment = "cam_front" self.image_folder = self.project_folder / "images" - self.individual_clouds_folder = self.project_folder / "lidar_clouds" + self.individual_clouds_folder = self.project_folder / "lidar_slam" / "individual_clouds" + self.lidar_slam_traj_file = self.project_folder / "lidar_slam" / "slam_poses.csv" self.output_folder = self.project_folder / "outputs" self.lidar_output_folder = self.output_folder / "lidar" self.lidar_output_folder.mkdir(exist_ok=True, parents=True) @@ -144,10 +145,9 @@ def run_openmvs(self): ) def compute_sim3(self): - lidar_slam_traj_file = self.project_folder / "slam_poses_robotics.csv" colmap_traj_file = self.colmap_output_folder / "transforms.json" rescaled_colmap_traj_file = self.colmap_output_folder / self.metric_json_filename # TODO refactor - lidar_slam_traj = VilensSlamTrajReader(lidar_slam_traj_file).read_file() + lidar_slam_traj = VilensSlamTrajReader(self.lidar_slam_traj_file).read_file() camera_alignment = self.sensor.get_camera(self.camera_for_alignment) valid_folder_path = "images/" + Sensor.convert_camera_topic_to_folder_name(camera_alignment.topic) logger.info(f'Loading only "{self.camera_for_alignment}" with directory "{valid_folder_path}" from json file') From 0b629d5dec334e7de8a25b36225326cb0ca585ac Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 27 Sep 2024 12:37:16 +0100 Subject: [PATCH 10/83] refactor: save error cloud to benchmark folder --- scripts/reconstruction_benchmark/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 847326a..7e75f97 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -193,7 +193,8 @@ def evaluate_reconstruction(self, input_cloud_path): input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) logger.info(get_recon_metrics(input_cloud_np, gt_cloud_np)) - save_error_cloud(input_cloud_np, gt_cloud_np, str(Path(self.project_folder) / "input_error.pcd")) + error_cloud_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_error.pcd") + save_error_cloud(input_cloud_np, gt_cloud_np, str(error_cloud_file)) if __name__ == "__main__": From 19317bdd1896977dddc63cf38fa3c28ee7417620 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Mon, 30 Sep 2024 15:15:00 +0100 Subject: [PATCH 11/83] refactor: save lidar occupancy cloud to benchamrk since it filters dynamic objects --- scripts/reconstruction_benchmark/main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 7e75f97..98bacc1 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -1,4 +1,5 @@ import logging +import shutil from copy import deepcopy from datetime import datetime from pathlib import Path @@ -121,6 +122,7 @@ def process_lidar_clouds(self): f"{Path(lidar_cloud_octomap_file).stem}_occ.pcd" ) convertOctreeToPointCloud(str(lidar_cloud_octomap_file), str(lidar_cloud_free_path), str(lidar_cloud_occ_path)) + shutil.copy(lidar_cloud_occ_path, self.lidar_occ_benchmark_file) logger.info("Merging and downsampling lidar clouds") _ = merge_downsample_vilens_slam_clouds( self.individual_clouds_folder, self.cloud_downsample_voxel_size, self.lidar_cloud_merged_path From 4f2591e165ca9f4f302aa6a3edb1adfe781f1646 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 2 Oct 2024 14:07:58 +0100 Subject: [PATCH 12/83] feat: add keble-4 to download list --- scripts/dataset_download.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/dataset_download.py b/scripts/dataset_download.py index 3cf1491..2e202a7 100644 --- a/scripts/dataset_download.py +++ b/scripts/dataset_download.py @@ -9,10 +9,18 @@ repo_id = "ori-drs/oxford_spires_dataset" download_sequences = True download_ground_truth = True -dataset_sequences = ["2024-03-13-observatory-quarter-01", "2024-03-14-blenheim-05"] +dataset_sequences = [ + "2024-03-12-keble-college-04", + "2024-03-13-observatory-quarter-01", + "2024-03-14-blenheim-palace-05", +] file_lists = ["images.zip", "lidar_slam.zip", "T_gt_lidar.txt"] -ground_truth_lists = ["observatory-quarter", "blenheim-palace"] +ground_truth_lists = [ + "blenheim-palace", + "keble-college", + "observatory-quarter", +] ground_truth_lists = [f"ground_truth_cloud/{site}" for site in ground_truth_lists] cloud_file = "individual_cloud_e57.zip" From a6ca8f1abdbd3517abf903d686931147aeb784a1 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 2 Oct 2024 18:16:14 +0100 Subject: [PATCH 13/83] feat: add chch-02 to download list --- scripts/dataset_download.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/dataset_download.py b/scripts/dataset_download.py index 2e202a7..c8b86b2 100644 --- a/scripts/dataset_download.py +++ b/scripts/dataset_download.py @@ -13,11 +13,13 @@ "2024-03-12-keble-college-04", "2024-03-13-observatory-quarter-01", "2024-03-14-blenheim-palace-05", + "2024-03-18-christ-church-02", ] file_lists = ["images.zip", "lidar_slam.zip", "T_gt_lidar.txt"] ground_truth_lists = [ "blenheim-palace", + "christ-church", "keble-college", "observatory-quarter", ] From e9893879c86d3de0aed783b3b0076c5682504f87 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 2 Oct 2024 18:38:20 +0100 Subject: [PATCH 14/83] refactor: move gt folder outside since each sequence will use the same gt --- scripts/reconstruction_benchmark/main.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 98bacc1..10119c4 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -40,9 +40,10 @@ def setup_logging(): class ReconstructionBenchmark: - def __init__(self, project_folder, sensor): + def __init__(self, project_folder, gt_folder, sensor): self.project_folder = Path(project_folder) self.project_folder.mkdir(parents=True, exist_ok=True) + self.gt_folder = Path(gt_folder) self.sensor = sensor self.camera_for_alignment = "cam_front" self.image_folder = self.project_folder / "images" @@ -60,8 +61,8 @@ def __init__(self, project_folder, sensor): self.cloud_downsample_voxel_size = 0.05 self.gt_octree_path = self.recon_benchmark_dir / "gt_cloud.bt" self.gt_cloud_merged_path = self.recon_benchmark_dir / "gt_cloud_merged.pcd" - self.gt_cloud_individual_e57_folder = self.project_folder / "gt" / "individual_e57_clouds" - self.gt_cloud_individual_pcd_folder = self.project_folder / "gt" / "individual_pcd_clouds" + self.gt_cloud_individual_e57_folder = self.gt_folder / "individual_cloud_e57" + self.gt_cloud_individual_pcd_folder = self.gt_folder / "individual_cloud_pcd" self.colmap_sparse_folder = self.colmap_output_folder / "sparse" / "0" self.openmvs_bin = "/usr/local/bin/OpenMVS" @@ -75,6 +76,7 @@ def __init__(self, project_folder, sensor): logger.info(f"Project folder: {self.project_folder}") self.lidar_cloud_merged_path = self.recon_benchmark_dir / "lidar_cloud_merged.pcd" + self.lidar_occ_benchmark_file = self.recon_benchmark_dir / "lidar_occ.pcd" def process_gt_cloud(self): logger.info("Converting ground truth clouds from e57 to pcd") @@ -206,7 +208,7 @@ def evaluate_reconstruction(self, input_cloud_path): sensor_config = yaml.safe_load(f)["sensor"] sensor = Sensor(**sensor_config) project_folder = "/home/oxford_spires_dataset/data/2024-03-13-observatory-quarter-01" - recon_benchmark = ReconstructionBenchmark(project_folder, sensor) + recon_benchmark = ReconstructionBenchmark(project_folder, gt_folder, sensor) recon_benchmark.load_lidar_gt_transform() recon_benchmark.process_gt_cloud() recon_benchmark.process_lidar_clouds() From 437a9ef63d55ecc1dd051801fa4d6d7614a9ab89 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 3 Oct 2024 16:34:50 +0100 Subject: [PATCH 15/83] feat: add sdfstudio install in docker by default we still use nerfstudio --- .docker/Dockerfile | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.docker/Dockerfile b/.docker/Dockerfile index 59be9bd..72de92a 100644 --- a/.docker/Dockerfile +++ b/.docker/Dockerfile @@ -126,6 +126,14 @@ RUN pip install --no-cache-dir --upgrade pip RUN pip install --no-cache-dir torch==2.1.2+cu118 torchvision==0.16.2+cu118 'numpy<2.0.0' --extra-index-url https://download.pytorch.org/whl/cu118 RUN TCNN_CUDA_ARCHITECTURES="${CUDA_ARCHITECTURES}" pip install --no-cache-dir "git+https://github.com/NVlabs/tiny-cuda-nn.git@b3473c81396fe927293bdfd5a6be32df8769927c#subdirectory=bindings/torch" +FROM colmap as sdfstudio +RUN pip install --no-cache-dir --upgrade pip +RUN pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 -f https://download.pytorch.org/whl/torch_stable.html +RUN TCNN_CUDA_ARCHITECTURES="${CUDA_ARCHITECTURES}" pip install --no-cache-dir "git+https://github.com/NVlabs/tiny-cuda-nn.git@b3473c81396fe927293bdfd5a6be32df8769927c#subdirectory=bindings/torch" +RUN git clone https://github.com/autonomousvision/sdfstudio.git ${HOME_DIR}/sdfstudio &&\ + cd ${HOME_DIR}/sdfstudio &&\ + pip install -e . + #################### # Deployment image # #################### From e15aabd27f8644edf871630b80d50b0a527c0c42 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 4 Oct 2024 11:04:02 +0100 Subject: [PATCH 16/83] feat: support no timestamped nerf json reader --- .../trajectory/file_interfaces/nerf.py | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/oxford_spires_utils/trajectory/file_interfaces/nerf.py b/oxford_spires_utils/trajectory/file_interfaces/nerf.py index ae4d3c1..dd0f360 100644 --- a/oxford_spires_utils/trajectory/file_interfaces/nerf.py +++ b/oxford_spires_utils/trajectory/file_interfaces/nerf.py @@ -30,7 +30,7 @@ def __init__(self, file_path, nerf_reader_valid_folder_path="", nerf_reader_sort self.valid_folder_path = nerf_reader_valid_folder_path self.sort_timestamp = nerf_reader_sort_timestamp - def read_file(self): + def read_file(self, has_timestamp=True): """ Read NeRF trajectory file (transforms.json) @return: PosePath3D from evo @@ -44,21 +44,24 @@ def read_file(self): if self.valid_folder_path != "": if not frame["file_path"].startswith(self.valid_folder_path): continue - t_float128 = NeRFTrajUtils.get_t_float128_from_fname(frame["file_path"]) T = np.array(frame["transform_matrix"]) assert T.shape == (4, 4) assert np.allclose(T[3, :], np.array([0, 0, 0, 1])) - timestamps.append(t_float128) poses_se3.append(T) + if has_timestamp: + t_float128 = NeRFTrajUtils.get_t_float128_from_fname(frame["file_path"]) + timestamps.append(t_float128) - timestamps = np.array(timestamps) poses_se3 = np.array(poses_se3) - if self.sort_timestamp: - sort_idx = np.argsort(timestamps) - timestamps = timestamps[sort_idx] - poses_se3 = poses_se3[sort_idx] - - return evo.core.trajectory.PoseTrajectory3D(poses_se3=poses_se3, timestamps=timestamps) + if has_timestamp: + timestamps = np.array(timestamps) + if self.sort_timestamp: + sort_idx = np.argsort(timestamps) + timestamps = timestamps[sort_idx] + poses_se3 = poses_se3[sort_idx] + return evo.core.trajectory.PoseTrajectory3D(poses_se3=poses_se3, timestamps=timestamps) + + return evo.core.trajectory.PosePath3D(poses_se3=poses_se3) class NeRFTrajWriter(BasicTrajWriter): From 3a2728d3a13bed3d65e12569e866ec5b0fd7da02 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 5 Oct 2024 16:15:20 +0100 Subject: [PATCH 17/83] refactor: move nerfstudio and sdfstudio install from requirements.txt to docker this make it easier to manage sdfstudio which uses an older version of nerfstudio --- .docker/Dockerfile | 6 ++++++ requirements.txt | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.docker/Dockerfile b/.docker/Dockerfile index 72de92a..2b2bac0 100644 --- a/.docker/Dockerfile +++ b/.docker/Dockerfile @@ -122,16 +122,22 @@ RUN git clone https://github.com/colmap/colmap.git --branch 3.8 ${HOME_DIR}/colm #################### FROM colmap as nerfstudio +ARG NERFSTUDIO_VERSION=v1.1.4 RUN pip install --no-cache-dir --upgrade pip RUN pip install --no-cache-dir torch==2.1.2+cu118 torchvision==0.16.2+cu118 'numpy<2.0.0' --extra-index-url https://download.pytorch.org/whl/cu118 RUN TCNN_CUDA_ARCHITECTURES="${CUDA_ARCHITECTURES}" pip install --no-cache-dir "git+https://github.com/NVlabs/tiny-cuda-nn.git@b3473c81396fe927293bdfd5a6be32df8769927c#subdirectory=bindings/torch" +RUN git clone https://github.com/nerfstudio-project/nerfstudio.git --branch ${NERFSTUDIO_VERSION} ${HOME_DIR}/nerfstudio &&\ + cd ${HOME_DIR}/nerfstudio &&\ + pip install -e . FROM colmap as sdfstudio +ARG SDFSTUDIO_COMMIT=370902a RUN pip install --no-cache-dir --upgrade pip RUN pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 -f https://download.pytorch.org/whl/torch_stable.html RUN TCNN_CUDA_ARCHITECTURES="${CUDA_ARCHITECTURES}" pip install --no-cache-dir "git+https://github.com/NVlabs/tiny-cuda-nn.git@b3473c81396fe927293bdfd5a6be32df8769927c#subdirectory=bindings/torch" RUN git clone https://github.com/autonomousvision/sdfstudio.git ${HOME_DIR}/sdfstudio &&\ cd ${HOME_DIR}/sdfstudio &&\ + git checkout ${SDFSTUDIO_COMMIT} &&\ pip install -e . #################### diff --git a/requirements.txt b/requirements.txt index d5488ee..1eb008f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ scipy>=1.10.1 pytest>=8.0.0 pypcd4>=1.1.0 pye57>=0.4.13 -nerfstudio==1.1.4 +# nerfstudio==1.1.4 evo>=1.29.0 pytransform3d>=3.5.0 huggingface_hub>=0.25.1 \ No newline at end of file From dcd7929f1c0c5b65e171f1b92151a400b2db5cc6 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 5 Oct 2024 17:31:37 +0100 Subject: [PATCH 18/83] refactor: use relative symlink this supports opening symlink outisde docker --- scripts/reconstruction_benchmark/nerf.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 120134a..3b18dec 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -1,4 +1,5 @@ import json +import os import sys from pathlib import Path @@ -26,22 +27,24 @@ def generate_nerfstudio_config( def create_nerfstudio_dir(colmap_dir, ns_dir, image_dir): - ns_dir = Path(ns_dir) - colmap_dir = Path(colmap_dir) - image_dir = Path(image_dir) + ns_dir = Path(ns_dir).resolve() + colmap_dir = Path(colmap_dir).resolve() + image_dir = Path(image_dir).resolve() # Ensure ns_dir exists ns_dir.mkdir(parents=True, exist_ok=True) # Symlink image_dir to ns_dir image_symlink = ns_dir / image_dir.name if not image_symlink.exists(): - image_symlink.symlink_to(image_dir) + relative_image_dir = Path(os.path.relpath(str(image_dir.parent), str(ns_dir))) / image_dir.name + image_symlink.symlink_to(relative_image_dir) # Symlink contents of colmap_dir to ns_dir for item in colmap_dir.iterdir(): item_symlink = ns_dir / item.name if not item_symlink.exists(): - item_symlink.symlink_to(item) + relative_item = Path(os.path.relpath(str(colmap_dir), str(ns_dir))) / item.name + item_symlink.symlink_to(relative_item) def update_argv(nerfstudio_config): From 77b2f8f4986134d2ec396741d5fe96da709e6614 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 6 Oct 2024 14:12:41 +0100 Subject: [PATCH 19/83] refactor: move colmap json export to main --- scripts/reconstruction_benchmark/main.py | 14 +++++++++++--- scripts/reconstruction_benchmark/sfm.py | 20 +++++++++----------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 10119c4..91c3120 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -9,7 +9,7 @@ import yaml from mvs import rescale_openmvs_cloud, run_openmvs, transform_cloud_to_gt_frame from nerf import create_nerfstudio_dir, generate_nerfstudio_config, run_nerfstudio -from sfm import rescale_colmap_json, run_colmap +from sfm import export_json, rescale_colmap_json, run_colmap from oxford_spires_utils.bash_command import print_with_colour from oxford_spires_utils.eval import get_recon_metrics, save_error_cloud @@ -64,7 +64,8 @@ def __init__(self, project_folder, gt_folder, sensor): self.gt_cloud_individual_e57_folder = self.gt_folder / "individual_cloud_e57" self.gt_cloud_individual_pcd_folder = self.gt_folder / "individual_cloud_pcd" - self.colmap_sparse_folder = self.colmap_output_folder / "sparse" / "0" + self.colmap_sparse_folder = self.colmap_output_folder / "sparse" + self.colmap_sparse_0_folder = self.colmap_sparse_folder / "0" self.openmvs_bin = "/usr/local/bin/OpenMVS" self.mvs_output_folder = self.output_folder / "mvs" self.mvs_output_folder.mkdir(exist_ok=True, parents=True) @@ -131,7 +132,14 @@ def process_lidar_clouds(self): ) def run_colmap(self, matcher="vocab_tree_matcher"): - run_colmap(self.image_folder, self.colmap_output_folder, matcher=matcher) + camera_model = "OPENCV_FISHEYE" + run_colmap(self.image_folder, self.colmap_output_folder, matcher=matcher, camera_model=camera_model) + export_json( + self.colmap_sparse_0_folder, + json_file_name="transforms.json", + output_dir=self.colmap_output_folder, + camera_model=camera_model, + ) create_nerfstudio_dir(self.colmap_output_folder, self.ns_data_dir, self.image_folder) def run_openmvs(self): diff --git a/scripts/reconstruction_benchmark/sfm.py b/scripts/reconstruction_benchmark/sfm.py index 17f8876..71962dc 100644 --- a/scripts/reconstruction_benchmark/sfm.py +++ b/scripts/reconstruction_benchmark/sfm.py @@ -69,9 +69,6 @@ def run_colmap( colmap_feature_extractor_cmd = " ".join(colmap_feature_extractor_cmd) logger.info(f"Running {colmap_feature_extractor_cmd}") run_command(colmap_feature_extractor_cmd, print_command=False) - colmap_db = COLMAPDatabase.connect(database_path) - total_image_num = colmap_db.execute("SELECT COUNT(*) FROM images").fetchone()[0] - logger.debug(f"Total number of images in COLMAP database: {total_image_num}") image_num = len(list(image_path.rglob("*"))) colmap_matcher_cmd = [ @@ -116,13 +113,6 @@ def run_colmap( # from nerfstudio.process_data.colmap_utils import colmap_to_json # num_image_matched = colmap_to_json(recon_dir=sparse_0_path, output_dir=output_path) - logger.info("Exporting COLMAP to json file") - num_frame_matched = export_json( - sparse_0_path, json_file_name="transforms.json", output_dir=output_path, camera_model=camera_model - ) - logger.info( - f"COLMAP matched {num_frame_matched} / {total_image_num} images {num_frame_matched / total_image_num * 100:.2f}%" - ) def rescale_colmap_json(json_file, sim3_matrix, output_file): @@ -150,10 +140,12 @@ def rescale_colmap_json(json_file, sim3_matrix, output_file): def export_json(input_bin_dir=None, json_file_name="transforms.json", output_dir=None, camera_model="OPENCV_FISHEYE"): + logger.info("Exporting COLMAP to json file") camera_mask_path = None input_bin_dir = Path(input_bin_dir) cameras_path = input_bin_dir / "cameras.bin" images_path = input_bin_dir / "images.bin" + database_path = output_dir / "database.db" output_dir = input_bin_dir if output_dir is None else Path(output_dir) cameras = read_cameras_binary(cameras_path) @@ -181,12 +173,18 @@ def export_json(input_bin_dir=None, json_file_name="transforms.json", output_dir out = {} out["camera_model"] = camera_model out["frames"] = frames + num_frame_matched = len(frames) + + colmap_db = COLMAPDatabase.connect(database_path) + total_image_num = colmap_db.execute("SELECT COUNT(*) FROM images").fetchone()[0] + logger.info( + f"COLMAP matched {num_frame_matched} / {total_image_num} images {num_frame_matched / total_image_num * 100:.2f}%" + ) # Save for scale adjustment later assert json_file_name[-5:] == ".json" with open(output_dir / json_file_name, "w", encoding="utf-8") as f: json.dump(out, f, indent=4) - return len(frames) def generate_json_camera_data(camera, camera_model): From e899bf4c5faf8fae09383db08d963fe8643b9124 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 6 Oct 2024 14:13:13 +0100 Subject: [PATCH 20/83] fix: correct sparse folder for openmvs --- scripts/reconstruction_benchmark/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 91c3120..23b23c9 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -150,7 +150,7 @@ def run_openmvs(self): run_openmvs( self.image_folder, self.colmap_output_folder, - self.colmap_sparse_folder, + self.colmap_sparse_0_folder, self.mvs_output_folder, self.mvs_max_image_size, self.openmvs_bin, From 3e4ce6c6e784c6cba5f4c595b864b377fb0debca Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 6 Oct 2024 14:30:24 +0100 Subject: [PATCH 21/83] refactor: move colmap undistorter from mvs into sfm --- scripts/reconstruction_benchmark/main.py | 11 ++++++++--- scripts/reconstruction_benchmark/mvs.py | 15 +-------------- scripts/reconstruction_benchmark/sfm.py | 19 ++++++++++++++++++- 3 files changed, 27 insertions(+), 18 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 23b23c9..06331a9 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -69,7 +69,7 @@ def __init__(self, project_folder, gt_folder, sensor): self.openmvs_bin = "/usr/local/bin/OpenMVS" self.mvs_output_folder = self.output_folder / "mvs" self.mvs_output_folder.mkdir(exist_ok=True, parents=True) - self.mvs_max_image_size = 600 + self.colmap_undistort_max_image_size = 600 self.ns_data_dir = self.output_folder / "nerfstudio" / self.project_folder.name self.metric_json_filename = "transforms_metric.json" @@ -133,7 +133,13 @@ def process_lidar_clouds(self): def run_colmap(self, matcher="vocab_tree_matcher"): camera_model = "OPENCV_FISHEYE" - run_colmap(self.image_folder, self.colmap_output_folder, matcher=matcher, camera_model=camera_model) + run_colmap( + self.image_folder, + self.colmap_output_folder, + matcher=matcher, + camera_model=camera_model, + max_image_size=self.colmap_undistort_max_image_size, + ) export_json( self.colmap_sparse_0_folder, json_file_name="transforms.json", @@ -152,7 +158,6 @@ def run_openmvs(self): self.colmap_output_folder, self.colmap_sparse_0_folder, self.mvs_output_folder, - self.mvs_max_image_size, self.openmvs_bin, ) diff --git a/scripts/reconstruction_benchmark/mvs.py b/scripts/reconstruction_benchmark/mvs.py index 34dd1fb..86e9c87 100644 --- a/scripts/reconstruction_benchmark/mvs.py +++ b/scripts/reconstruction_benchmark/mvs.py @@ -52,24 +52,11 @@ def run_colmap_mvs(image_path, colmap_output_path, sparse_folder, max_image_size run_command(colmap_delauany_mesh_filter_cmd, print_command=True) -def run_openmvs( - image_path, colmap_output_path, sparse_folder, mvs_dir, max_image_size, openmvs_bin="/usr/local/bin/OpenMVS" -): +def run_openmvs(image_path, colmap_output_path, sparse_folder, mvs_dir, openmvs_bin="/usr/local/bin/OpenMVS"): logger.info(f"Running OpenMVS; img_path {image_path}; output: {mvs_dir}") colmap_output_path = Path(colmap_output_path) mvs_dir.mkdir(parents=True, exist_ok=True) - colmap_image_undistorter_cmd = [ - "colmap image_undistorter", - f"--image_path {image_path}", - f"--input_path {sparse_folder}", - f"--output_path {colmap_output_path/'dense'}", - "--output_type COLMAP", - f"--max_image_size {max_image_size}", - ] - colmap_image_undistorter_cmd = " ".join(colmap_image_undistorter_cmd) - run_command(colmap_image_undistorter_cmd, print_command=True) - # Export to openMVS export_cmd = [ f"{openmvs_bin}/InterfaceCOLMAP", diff --git a/scripts/reconstruction_benchmark/sfm.py b/scripts/reconstruction_benchmark/sfm.py index 71962dc..4602bca 100644 --- a/scripts/reconstruction_benchmark/sfm.py +++ b/scripts/reconstruction_benchmark/sfm.py @@ -50,7 +50,12 @@ def get_vocab_tree(image_num) -> Path: def run_colmap( - image_path, output_path, camera_model="OPENCV_FISHEYE", matcher="vocab_tree_matcher", loop_detection_period=10 + image_path, + output_path, + camera_model="OPENCV_FISHEYE", + matcher="vocab_tree_matcher", + loop_detection_period=10, + max_image_size=1000, ): logger.debug(f"Running colmap; img_path {image_path}; output: {output_path}, {camera_model}") assert camera_model in camera_model_list, f"{camera_model} not supported. Supported models: {camera_model_list}" @@ -111,6 +116,18 @@ def run_colmap( logger.info(f"Running {colmap_ba_cmd}") run_command(colmap_ba_cmd, print_command=False) + colmap_image_undistorter_cmd = [ + "colmap image_undistorter", + f"--image_path {image_path}", + f"--input_path {sparse_0_path}", + f"--output_path {output_path/'dense'}", + "--output_type COLMAP", + f"--max_image_size {max_image_size}", + ] + colmap_image_undistorter_cmd = " ".join(colmap_image_undistorter_cmd) + logger.info(f"Running {colmap_image_undistorter_cmd}") + run_command(colmap_image_undistorter_cmd, print_command=False) + # from nerfstudio.process_data.colmap_utils import colmap_to_json # num_image_matched = colmap_to_json(recon_dir=sparse_0_path, output_dir=output_path) From 8172429d3b59e9313213e7e1875c77d218350973 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 6 Oct 2024 14:50:39 +0100 Subject: [PATCH 22/83] refactor: get cam model from colmap bin --- scripts/reconstruction_benchmark/main.py | 1 - scripts/reconstruction_benchmark/sfm.py | 11 ++++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 06331a9..5202f6f 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -144,7 +144,6 @@ def run_colmap(self, matcher="vocab_tree_matcher"): self.colmap_sparse_0_folder, json_file_name="transforms.json", output_dir=self.colmap_output_folder, - camera_model=camera_model, ) create_nerfstudio_dir(self.colmap_output_folder, self.ns_data_dir, self.image_folder) diff --git a/scripts/reconstruction_benchmark/sfm.py b/scripts/reconstruction_benchmark/sfm.py index 4602bca..be3d122 100644 --- a/scripts/reconstruction_benchmark/sfm.py +++ b/scripts/reconstruction_benchmark/sfm.py @@ -156,7 +156,7 @@ def rescale_colmap_json(json_file, sim3_matrix, output_file): json.dump(data, f, indent=2) -def export_json(input_bin_dir=None, json_file_name="transforms.json", output_dir=None, camera_model="OPENCV_FISHEYE"): +def export_json(input_bin_dir, json_file_name="transforms.json", output_dir=None): logger.info("Exporting COLMAP to json file") camera_mask_path = None input_bin_dir = Path(input_bin_dir) @@ -178,7 +178,7 @@ def export_json(input_bin_dir=None, json_file_name="transforms.json", output_dir c2w = np.linalg.inv(w2c) # this is the coordinate for openMVS c2w = get_nerf_pose(c2w) - frame = generate_json_camera_data(camera, camera_model) + frame = generate_json_camera_data(camera) frame["file_path"] = Path(f"./images/{im_data.name}").as_posix() # assume images not in image path in colmap frame["transform_matrix"] = c2w.tolist() frame["colmap_img_id"] = img_id @@ -188,7 +188,7 @@ def export_json(input_bin_dir=None, json_file_name="transforms.json", output_dir frames.append(frame) out = {} - out["camera_model"] = camera_model + out["camera_model"] = camera.model out["frames"] = frames num_frame_matched = len(frames) @@ -204,8 +204,9 @@ def export_json(input_bin_dir=None, json_file_name="transforms.json", output_dir json.dump(out, f, indent=4) -def generate_json_camera_data(camera, camera_model): - assert camera_model in ["OPENCV_FISHEYE", "OPENCV"] +def generate_json_camera_data(camera): + camera_model = camera.model + assert camera_model in ["OPENCV_FISHEYE", "OPENCV", "PINHOLE"] data = { "fl_x": float(camera.params[0]), "fl_y": float(camera.params[1]), From 0381ebe79620a9a3484a50bbc40dd487425ac063 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 6 Oct 2024 14:59:06 +0100 Subject: [PATCH 23/83] feat: save undistorted json --- scripts/reconstruction_benchmark/main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 5202f6f..1376197 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -145,6 +145,11 @@ def run_colmap(self, matcher="vocab_tree_matcher"): json_file_name="transforms.json", output_dir=self.colmap_output_folder, ) + export_json( + input_bin_dir=self.colmap_output_folder / "dense" / "sparse", + json_file_name="transforms_undistorted.json", + output_dir=self.colmap_output_folder, + ) create_nerfstudio_dir(self.colmap_output_folder, self.ns_data_dir, self.image_folder) def run_openmvs(self): From 14df73cfdebda61d82148b9dd0aa74175ba60eca Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 6 Oct 2024 16:49:11 +0100 Subject: [PATCH 24/83] feat: export json and ns dir for undistorted images --- scripts/reconstruction_benchmark/main.py | 10 ++++++++-- scripts/reconstruction_benchmark/sfm.py | 4 ++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 1376197..2b1329a 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -147,10 +147,16 @@ def run_colmap(self, matcher="vocab_tree_matcher"): ) export_json( input_bin_dir=self.colmap_output_folder / "dense" / "sparse", - json_file_name="transforms_undistorted.json", - output_dir=self.colmap_output_folder, + json_file_name="transforms.json", + output_dir=self.colmap_output_folder / "dense", + db_file=self.colmap_output_folder / "database.db", ) create_nerfstudio_dir(self.colmap_output_folder, self.ns_data_dir, self.image_folder) + create_nerfstudio_dir( + self.colmap_output_folder / "dense", + self.ns_data_dir.with_name(self.ns_data_dir.name + "_undistorted"), + self.ns_data_dir / "dense" / self.image_folder.name, + ) def run_openmvs(self): # check if multiple sparse folders exist diff --git a/scripts/reconstruction_benchmark/sfm.py b/scripts/reconstruction_benchmark/sfm.py index be3d122..d335e56 100644 --- a/scripts/reconstruction_benchmark/sfm.py +++ b/scripts/reconstruction_benchmark/sfm.py @@ -156,13 +156,13 @@ def rescale_colmap_json(json_file, sim3_matrix, output_file): json.dump(data, f, indent=2) -def export_json(input_bin_dir, json_file_name="transforms.json", output_dir=None): +def export_json(input_bin_dir, json_file_name="transforms.json", output_dir=None, db_file=None): logger.info("Exporting COLMAP to json file") camera_mask_path = None input_bin_dir = Path(input_bin_dir) cameras_path = input_bin_dir / "cameras.bin" images_path = input_bin_dir / "images.bin" - database_path = output_dir / "database.db" + database_path = output_dir / "database.db" if db_file is None else Path(db_file) output_dir = input_bin_dir if output_dir is None else Path(output_dir) cameras = read_cameras_binary(cameras_path) From f631a56a7c566a32b95274edab093fe2e6d69696 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 6 Oct 2024 16:50:50 +0100 Subject: [PATCH 25/83] feat: export json and ns_dir for undistorted colmap images --- scripts/reconstruction_benchmark/main.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 2b1329a..ad1a221 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -73,7 +73,6 @@ def __init__(self, project_folder, gt_folder, sensor): self.ns_data_dir = self.output_folder / "nerfstudio" / self.project_folder.name self.metric_json_filename = "transforms_metric.json" - self.ns_model_dir = self.ns_data_dir / "trained_models" logger.info(f"Project folder: {self.project_folder}") self.lidar_cloud_merged_path = self.recon_benchmark_dir / "lidar_cloud_merged.pcd" @@ -204,9 +203,11 @@ def compute_sim3(self): if not ns_metric_json_file.exists(): ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) # TODO remove old ones? - def run_nerfstudio(self, method="nerfacto", json_filename="transforms_metric.json"): - assert self.ns_data_dir.exists(), f"nerfstudio directory not found at {self.ns_data_dir}" - ns_config = generate_nerfstudio_config(method, self.ns_data_dir / json_filename, self.ns_model_dir) + def run_nerfstudio(self, method="nerfacto", ns_data_dir=None, json_filename="transforms_metric.json"): + ns_data_dir = self.ns_data_dir if ns_data_dir is None else Path(ns_data_dir) + ns_model_dir = ns_data_dir / "trained_models" + assert ns_data_dir.exists(), f"nerfstudio directory not found at {ns_data_dir}" + ns_config = generate_nerfstudio_config(method, ns_data_dir / json_filename, ns_model_dir) final_cloud_file = run_nerfstudio(ns_config) final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) @@ -240,5 +241,7 @@ def evaluate_reconstruction(self, input_cloud_path): recon_benchmark.run_openmvs() recon_benchmark.compute_sim3() recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) + undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") + # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms.json", ns_data_dir=undistorted_ns_dir) recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") recon_benchmark.run_nerfstudio("splatfacto") From 2f083b485e9ee44bb7391c012d21b5a788079beb Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Mon, 7 Oct 2024 11:47:03 +0100 Subject: [PATCH 26/83] feat: add ns-eval --- scripts/reconstruction_benchmark/nerf.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 3b18dec..17aa178 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -5,6 +5,7 @@ import numpy as np import open3d as o3d +from nerfstudio.scripts.eval import entrypoint as eval_entrypoint from nerfstudio.scripts.exporter import entrypoint as exporter_entrypoint from nerfstudio.scripts.train import entrypoint as train_entrypoint @@ -67,11 +68,16 @@ def run_nerfstudio(ns_config): output_log_dir = Path(ns_config["output-dir"]) / folder_name / ns_config["method"] lastest_output_folder = sorted([x for x in output_log_dir.glob("*") if x.is_dir()])[-1] latest_output_config = lastest_output_folder / "config.yml" + + # evaluate renders + render_dir = lastest_output_folder / "renders" + run_nerfstudio_eval(latest_output_config, render_dir) + + # export cloud export_method = "gaussian-splat" if ns_config["method"] == "splatfacto" else "pointcloud" output_cloud_file = run_nerfstudio_exporter(latest_output_config, export_method) ns_se3, scale_matrix = load_ns_transform(lastest_output_folder) cloud = o3d.io.read_point_cloud(str(output_cloud_file)) - cloud.transform(scale_matrix) cloud.transform(np.linalg.inv(ns_se3)) final_metric_cloud_file = output_cloud_file.with_name(f'{ns_config["method"]}_cloud_metric.ply') @@ -79,6 +85,18 @@ def run_nerfstudio(ns_config): return final_metric_cloud_file +def run_nerfstudio_eval(config_file, render_dir): + output_eval_file = config_file.parent / "eval_results.json" + eval_config = { + "load-config": config_file, + "output-path": output_eval_file, + "render-output-path": render_dir, + } + update_argv(eval_config) + eval_entrypoint() + sys.argv = [sys.argv[0]] + + def run_nerfstudio_exporter(config_file, export_method): exporter_config = { "method": export_method, From 712b3a71588589b5326ecda843e82b4b44541fa0 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Mon, 7 Oct 2024 11:47:36 +0100 Subject: [PATCH 27/83] fix: change folder name for nerfacto-big/huge and splatfacto-big --- scripts/reconstruction_benchmark/nerf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 17aa178..d7e6732 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -65,7 +65,9 @@ def run_nerfstudio(ns_config): sys.argv = [sys.argv[0]] ns_data = Path(ns_config["data"]) folder_name = ns_data.name if ns_data.is_dir() else ns_data.parent.name - output_log_dir = Path(ns_config["output-dir"]) / folder_name / ns_config["method"] + # rename nerfacto-big or nerfacto-huge to nerfacto, splatfacto-big to splatfacto + method_dir_name = ns_config["method"].replace("-big", "").replace("-huge", "") + output_log_dir = Path(ns_config["output-dir"]) / folder_name / method_dir_name lastest_output_folder = sorted([x for x in output_log_dir.glob("*") if x.is_dir()])[-1] latest_output_config = lastest_output_folder / "config.yml" From f23e809bd8c7f674cfc097e37d25b151b1fefbef Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Mon, 7 Oct 2024 11:49:07 +0100 Subject: [PATCH 28/83] feat: log the nerf eval results --- scripts/reconstruction_benchmark/nerf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index d7e6732..28060fa 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -1,4 +1,5 @@ import json +import logging import os import sys from pathlib import Path @@ -11,6 +12,8 @@ from oxford_spires_utils.bash_command import print_with_colour +logger = logging.getLogger(__name__) + def generate_nerfstudio_config( method, data_dir, output_dir, iterations=30000, eval_step=500, vis="wandb", cam_opt_mode="off" @@ -96,6 +99,7 @@ def run_nerfstudio_eval(config_file, render_dir): } update_argv(eval_config) eval_entrypoint() + logger.info(f"Eval results: {json.load(output_eval_file.open())}") sys.argv = [sys.argv[0]] From c041fb04ace82a543cb1ad8629d93e91b2f04fcd Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Mon, 7 Oct 2024 15:41:21 +0100 Subject: [PATCH 29/83] refactor: remove debug logs from nerfstudio and add info logs --- scripts/reconstruction_benchmark/nerf.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 28060fa..8d0d0a6 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -63,6 +63,8 @@ def update_argv(nerfstudio_config): def run_nerfstudio(ns_config): + logger.info(f"Running '{ns_config['method']}' on {ns_config['data']}") + logging.disable(logging.DEBUG) update_argv(ns_config) train_entrypoint() sys.argv = [sys.argv[0]] @@ -75,8 +77,10 @@ def run_nerfstudio(ns_config): latest_output_config = lastest_output_folder / "config.yml" # evaluate renders + logger.info(f"Evaluating from {lastest_output_folder}") render_dir = lastest_output_folder / "renders" run_nerfstudio_eval(latest_output_config, render_dir) + logging.disable(logging.NOTSET) # export cloud export_method = "gaussian-splat" if ns_config["method"] == "splatfacto" else "pointcloud" @@ -99,7 +103,7 @@ def run_nerfstudio_eval(config_file, render_dir): } update_argv(eval_config) eval_entrypoint() - logger.info(f"Eval results: {json.load(output_eval_file.open())}") + logger.info(f"Nerfstudio eval results\n{json.load(output_eval_file.open())}") sys.argv = [sys.argv[0]] From 636bcd2b97a83688d8b04ab2bf02546686ea8fd8 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 8 Oct 2024 11:40:12 +0100 Subject: [PATCH 30/83] refactor: move pose ply saving before traj loading --- scripts/reconstruction_benchmark/main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index ad1a221..034bcdf 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -174,6 +174,7 @@ def compute_sim3(self): colmap_traj_file = self.colmap_output_folder / "transforms.json" rescaled_colmap_traj_file = self.colmap_output_folder / self.metric_json_filename # TODO refactor lidar_slam_traj = VilensSlamTrajReader(self.lidar_slam_traj_file).read_file() + pose_to_ply(lidar_slam_traj, self.colmap_output_folder / "lidar_slam_traj.ply", [1.0, 0.0, 0.0]) camera_alignment = self.sensor.get_camera(self.camera_for_alignment) valid_folder_path = "images/" + Sensor.convert_camera_topic_to_folder_name(camera_alignment.topic) logger.info(f'Loading only "{self.camera_for_alignment}" with directory "{valid_folder_path}" from json file') @@ -189,6 +190,8 @@ def compute_sim3(self): # T_lidar_colmap = align(lidar_slam_traj, colmap_traj_single_cam, self.colmap_output_folder) T_lidar_colmap = align(lidar_slam_traj_cam_frame, colmap_traj_single_cam, self.colmap_output_folder) rescale_colmap_json(colmap_traj_file, T_lidar_colmap, rescaled_colmap_traj_file) + rescaled_colmap_traj = NeRFTrajReader(rescaled_colmap_traj_file).read_file() + pose_to_ply(rescaled_colmap_traj, self.colmap_output_folder / "rescaled_colmap_traj.ply", [0.0, 1.0, 0.0]) mvs_cloud_file = self.mvs_output_folder / "scene_dense_nerf_world.ply" self.scaled_mvs_cloud_file = self.mvs_output_folder / "OpenMVS_dense_cloud_metric.pcd" rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, self.scaled_mvs_cloud_file) @@ -196,9 +199,6 @@ def compute_sim3(self): transform_cloud_to_gt_frame( self.scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file ) - rescaled_colmap_traj = NeRFTrajReader(rescaled_colmap_traj_file).read_file() - pose_to_ply(rescaled_colmap_traj, self.colmap_output_folder / "rescaled_colmap_traj.ply", [0.0, 1.0, 0.0]) - pose_to_ply(lidar_slam_traj, self.colmap_output_folder / "lidar_slam_traj.ply", [1.0, 0.0, 0.0]) ns_metric_json_file = self.ns_data_dir / self.metric_json_filename if not ns_metric_json_file.exists(): ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) # TODO remove old ones? From a4308bfd70372b9dcfd5c30febcea236fad8699d Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 8 Oct 2024 18:27:00 +0100 Subject: [PATCH 31/83] feat: copy json handler from ndp without submap --- oxford_spires_utils/trajectory/nerf_json.py | 194 ++++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 oxford_spires_utils/trajectory/nerf_json.py diff --git a/oxford_spires_utils/trajectory/nerf_json.py b/oxford_spires_utils/trajectory/nerf_json.py new file mode 100644 index 0000000..00e02f8 --- /dev/null +++ b/oxford_spires_utils/trajectory/nerf_json.py @@ -0,0 +1,194 @@ +import json +import shutil +from pathlib import Path + + +class JsonHandler: + # remember to create a copy of the json when removing, + # otherwise frames will be skipped + def __init__(self, input_json_path) -> None: + self.load_json(input_json_path) + + def load_json(self, json_path): + with open(json_path, "r") as f: + self.traj = json.load(f) + + def save_json(self, json_path): + with open(json_path, "w") as f: + json.dump(self.traj, f, indent=4) + + def get_n_frames(self): + return len(self.traj["frames"]) + + def sync_with_folder(self, folder_path, valid_ext=".jpg"): + # get all files in subfolder + ref_files = list(Path(folder_path).glob("**/*" + valid_ext)) + print(f"{len(self.traj['frames'])} files in json") + + count = 0 + frames_copy = self.traj["frames"].copy() + for frame in self.traj["frames"]: + file_path = frame["file_path"] + exist = [file_path for ref_file in ref_files if file_path == ref_file.__str__()[-len(file_path) :]] + if len(exist) == 0: + # print(f"file {file_path} not exist, remove it from json") + frames_copy.remove(frame) + count += 1 + print(f"{len(ref_files)} files in reference folder") + print(f"removed {count} files, {len(frames_copy)} left") + self.traj["frames"] = frames_copy + + def remove_folder(self, folder_path): + frames_copy = self.traj["frames"].copy() + for frame in frames_copy: + file_path = Path(frame["file_path"]) + if file_path.parent == Path(folder_path): + self.traj["frames"].remove(frame) + print(f"removed {file_path} from json") + print(f"filter_folder {len(frames_copy)} files in json, {len(self.traj['frames'])} left") + + def keep_folder(self, folder_path): + frames_copy = self.traj["frames"].copy() + for frame in frames_copy: + file_path = Path(frame["file_path"]) + if file_path.parent != Path(folder_path): + self.traj["frames"].remove(frame) + print(f"removed {file_path} from json") + print(f"filter_folder {len(frames_copy)} files in json, {len(self.traj['frames'])} left") + + def keep_timestamp_only(self, start_time, end_time): + frames_copy = self.traj["frames"].copy() + for frame in frames_copy: + file_path = Path(frame["file_path"]) + timestamp = float(file_path.stem) + if timestamp < start_time or timestamp > end_time: + self.traj["frames"].remove(frame) + print(f"removed {file_path} from json") + print(f"keep_timestamp_only {len(frames_copy)} files in json, {len(self.traj['frames'])} left") + + def remove_timestamp(self, start_time, end_time): + frames_copy = self.traj["frames"].copy() + for frame in frames_copy: + file_path = Path(frame["file_path"]) + timestamp = float(file_path.stem) + if timestamp >= start_time and timestamp <= end_time: + self.traj["frames"].remove(frame) + print(f"removed {file_path} from json") + print(f"remove_timestamp {len(frames_copy)} files in json, {len(self.traj['frames'])} left") + + def skip_frames(self, skip): + frames_copy = self.traj["frames"].copy() + # sort + # frames_copy.sort(key=lambda x: float(Path(x["file_path"]).stem)) + + self.traj["frames"] = frames_copy[::skip] + print(f"Skipping: {len(frames_copy)} files in json, {len(self.traj['frames'])} left") + + def remove_intrinsics(self): + # frames_copy = self.traj["frames"].copy() + for frame in self.traj["frames"]: + frame.pop("fl_x") + frame.pop("fl_y") + frame.pop("cx") + frame.pop("cy") + frame.pop("k1") + frame.pop("k2") + frame.pop("k3") + frame.pop("k4") + frame.pop("h") + frame.pop("w") + + def add_depth(self, depth_folder=None): + frames_copy = self.traj["frames"].copy() + for frame in frames_copy: + if depth_folder is None: + # simply add a path to the depth file modified from the image file path + depth_file_path = frame["file_path"].replace("images", "depth").replace(".jpg", ".png") + frame["depth_file_path"] = depth_file_path + else: + # add if it exists, otherwise remove + depth_folder_stem = Path(depth_folder).stem + depth_file_path = frame["file_path"].replace("images", depth_folder_stem).replace(".jpg", ".png") + depth_file_path_full = Path(depth_folder).parent / depth_file_path + if depth_file_path_full.exists(): + frame["depth_file_path"] = depth_file_path.__str__() + else: + print(f"{depth_file_path_full} not exist") + self.traj["frames"].remove(frame) + + def add_normal(self, normal_folder=None): + frames_copy = self.traj["frames"].copy() + for frame in frames_copy: + if normal_folder is None: + # simply add a path to the depth file modified from the image file path + normal_file_path = frame["file_path"].replace("images", "normal").replace(".jpg", ".png") + frame["normal_file_path"] = normal_file_path + else: + # only add if it exists, otherwise remove + normal_folder_stem = Path(normal_folder).stem + normal_file_path = frame["file_path"].replace("images", normal_folder_stem).replace(".jpg", ".png") + normal_file_path_full = Path(normal_folder).parent / normal_file_path + if normal_file_path_full.exists(): + frame["normal_file_path"] = normal_file_path.__str__() + else: + print(f"{normal_file_path_full} not exist") + self.traj["frames"].remove(frame) + + def add_mask(self): + for frame in self.traj["frames"]: + frame["mask_path"] = frame["file_path"].replace("images", "masks") + + def get_clouds_in_json(self, cloud_dir, output_dir): + cloud_dir = Path(cloud_dir) + output_dir = Path(output_dir) + if output_dir.exists(): + shutil.rmtree(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + for frame in self.traj["frames"]: + # find / in file_path + img_path = Path(frame["file_path"]) + + cloud_path = cloud_dir / img_path.parent.name / img_path.name.replace(".jpg", ".pcd") + output_path = output_dir / img_path.parent.name / img_path.name.replace(".jpg", ".pcd") + # remove if exist + output_path.parent.mkdir(parents=True, exist_ok=True) + if cloud_path.exists(): + shutil.copy(cloud_path, output_dir / cloud_path.parent.name / cloud_path.name) + else: + test_path = cloud_path.parent / (cloud_path.stem[:-1] + cloud_path.suffix) + if test_path.exists(): + shutil.copy(test_path, output_dir / cloud_path.parent.name / test_path.name) + else: + print(f"{cloud_path} not exist") + + def get_images_in_json(self, image_dir, output_dir): + image_dir = Path(image_dir) + output_dir = Path(output_dir) + if output_dir.exists(): + shutil.rmtree(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + for frame in self.traj["frames"]: + # find / in file_path + img_path = image_dir / frame["file_path"] + assert img_path.exists(), f"{img_path} not exist" + + output_path = output_dir / img_path.parent.name / img_path.name + output_path.parent.mkdir(parents=True, exist_ok=True) + if img_path.exists(): + shutil.copy(img_path, output_dir / img_path.parent.name / img_path.name) + else: + print(f"{img_path} not exist") + + def update_hw(self): + for frame in self.traj["frames"]: + frame["h"] = 935 + # frame["w"] = w + + def write_pose_cloud(self, output_file): + import open3d as o3d + + output_cloud = o3d.geometry.PointCloud() + for frame in self.traj["frames"]: + xyz = get_xyz(frame) + output_cloud.points.append(xyz) + o3d.io.write_point_cloud(str(output_file), output_cloud) From 84e1a04cedf7419cf9d971284b2a620ae3136f6a Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 8 Oct 2024 18:31:16 +0100 Subject: [PATCH 32/83] refactor: rename to NeRFJsonHandler --- .../trajectory/{nerf_json.py => nerf_json_handler.py} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename oxford_spires_utils/trajectory/{nerf_json.py => nerf_json_handler.py} (99%) diff --git a/oxford_spires_utils/trajectory/nerf_json.py b/oxford_spires_utils/trajectory/nerf_json_handler.py similarity index 99% rename from oxford_spires_utils/trajectory/nerf_json.py rename to oxford_spires_utils/trajectory/nerf_json_handler.py index 00e02f8..c9907bf 100644 --- a/oxford_spires_utils/trajectory/nerf_json.py +++ b/oxford_spires_utils/trajectory/nerf_json_handler.py @@ -3,7 +3,7 @@ from pathlib import Path -class JsonHandler: +class NeRFJsonHandler: # remember to create a copy of the json when removing, # otherwise frames will be skipped def __init__(self, input_json_path) -> None: From de82bfb15f07d6aaae8dfcaeea62d264c1b50921 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 9 Oct 2024 15:41:32 +0100 Subject: [PATCH 33/83] feat: rename json for train/eval split --- .../trajectory/nerf_json_handler.py | 15 ++++++++ scripts/reconstruction_benchmark/nerf_json.py | 35 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 scripts/reconstruction_benchmark/nerf_json.py diff --git a/oxford_spires_utils/trajectory/nerf_json_handler.py b/oxford_spires_utils/trajectory/nerf_json_handler.py index c9907bf..ca18246 100644 --- a/oxford_spires_utils/trajectory/nerf_json_handler.py +++ b/oxford_spires_utils/trajectory/nerf_json_handler.py @@ -56,6 +56,21 @@ def keep_folder(self, folder_path): print(f"removed {file_path} from json") print(f"filter_folder {len(frames_copy)} files in json, {len(self.traj['frames'])} left") + def rename_filename(self, old_folder=None, new_folder=None, prefix="", suffix="", base_folder=None): + for frame in self.traj["frames"]: + file_path = Path(frame["file_path"]) + if old_folder is not None and new_folder is not None: + assert str(file_path).startswith(old_folder), f"{file_path} does not start with {old_folder}" + new_file_path = Path(str(file_path).replace(old_folder, new_folder)) + new_file_path = str(new_file_path.parent / (prefix + new_file_path.stem + suffix + new_file_path.suffix)) + frame["file_path"] = new_file_path + if base_folder is not None: + abs_old_file = Path(base_folder) / file_path + assert abs_old_file.exists(), f"{abs_old_file} not exist" + abs_new_file = Path(base_folder) / new_file_path + abs_new_file.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(abs_old_file, abs_new_file) + def keep_timestamp_only(self, start_time, end_time): frames_copy = self.traj["frames"].copy() for frame in frames_copy: diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py new file mode 100644 index 0000000..fa702c8 --- /dev/null +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -0,0 +1,35 @@ +from copy import deepcopy +from pathlib import Path + +from oxford_spires_utils.trajectory.nerf_json_handler import NeRFJsonHandler + + +def merge_two_json(json1, json2): + assert json1.traj.keys() == json2.traj.keys() + new_json = deepcopy(json1) + new_json.traj["frames"] += json2.traj["frames"] + return new_json + + +image_dir = "/home/docker_dev/oxford_spires_dataset/data/roq-full/images" +json_train_file = "/home/docker_dev/oxford_spires_dataset/data/roq-full/outputs/colmap/seq_1_fountain.json" +json_eval_file = "/home/docker_dev/oxford_spires_dataset/data/roq-full/outputs/colmap/seq_1_fountain_back.json" +new_image_dir = Path(image_dir).parent.resolve() / "images_train_eval" +merged_json_file = Path(json_train_file).parent / "merged.json" + + +json_train = NeRFJsonHandler(json_train_file) +json_eval = NeRFJsonHandler(json_eval_file) +json_train.rename_filename( + old_folder="images", new_folder=new_image_dir.stem, prefix="train_", base_folder=str(Path(image_dir).parent) +) +json_eval.rename_filename( + old_folder="images", new_folder=new_image_dir.stem, prefix="eval_", base_folder=str(Path(image_dir).parent) +) + + +# merge +new_json = merge_two_json(json_train, json_eval) +new_json.save_json(str(merged_json_file)) + +# create json with the new train/eval prefix From 5e60c50eaa729de76738f72449440b286766841c Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 9 Oct 2024 15:47:54 +0100 Subject: [PATCH 34/83] refactor: add nerfstudio iter in main --- scripts/reconstruction_benchmark/main.py | 2 +- scripts/reconstruction_benchmark/nerf.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 034bcdf..5aa2d82 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -207,7 +207,7 @@ def run_nerfstudio(self, method="nerfacto", ns_data_dir=None, json_filename="tra ns_data_dir = self.ns_data_dir if ns_data_dir is None else Path(ns_data_dir) ns_model_dir = ns_data_dir / "trained_models" assert ns_data_dir.exists(), f"nerfstudio directory not found at {ns_data_dir}" - ns_config = generate_nerfstudio_config(method, ns_data_dir / json_filename, ns_model_dir) + ns_config = generate_nerfstudio_config(method, ns_data_dir / json_filename, ns_model_dir, iterations=5000) final_cloud_file = run_nerfstudio(ns_config) final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 8d0d0a6..598b91c 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -16,7 +16,7 @@ def generate_nerfstudio_config( - method, data_dir, output_dir, iterations=30000, eval_step=500, vis="wandb", cam_opt_mode="off" + method, data_dir, output_dir, iterations=5000, eval_step=500, vis="wandb", cam_opt_mode="off" ): ns_config = { "method": method, From 8e7af2d71eaa65c6e23f7bffc10ff14e1aab57e8 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 00:07:37 +0100 Subject: [PATCH 35/83] feat: support eval-mode with nerfstudio-data config refactored --- scripts/reconstruction_benchmark/main.py | 10 ++++++--- scripts/reconstruction_benchmark/nerf.py | 27 +++++++++++++++--------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 5aa2d82..c7e8f33 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -203,12 +203,16 @@ def compute_sim3(self): if not ns_metric_json_file.exists(): ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) # TODO remove old ones? - def run_nerfstudio(self, method="nerfacto", ns_data_dir=None, json_filename="transforms_metric.json"): + def run_nerfstudio( + self, method="nerfacto", ns_data_dir=None, json_filename="transforms_metric.json", eval_mode="fraction" + ): ns_data_dir = self.ns_data_dir if ns_data_dir is None else Path(ns_data_dir) ns_model_dir = ns_data_dir / "trained_models" assert ns_data_dir.exists(), f"nerfstudio directory not found at {ns_data_dir}" - ns_config = generate_nerfstudio_config(method, ns_data_dir / json_filename, ns_model_dir, iterations=5000) - final_cloud_file = run_nerfstudio(ns_config) + ns_config, ns_data_config = generate_nerfstudio_config( + method, ns_data_dir / json_filename, ns_model_dir, eval_mode=eval_mode + ) + final_cloud_file = run_nerfstudio(ns_config, ns_data_config) final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) def evaluate_reconstruction(self, input_cloud_path): diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 598b91c..a94a3f1 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -16,18 +16,24 @@ def generate_nerfstudio_config( - method, data_dir, output_dir, iterations=5000, eval_step=500, vis="wandb", cam_opt_mode="off" + method, data_dir, output_dir, iterations=5000, eval_step=500, vis="wandb", cam_opt_mode="off", eval_mode="fraction" ): + exp_name = Path(data_dir).name if Path(data_dir).is_dir() else Path(data_dir).parent.name ns_config = { "method": method, - "data": str(data_dir), + "experiment-name": str(exp_name), "output-dir": str(output_dir), "vis": vis, "max-num-iterations": iterations, "pipeline.model.camera-optimizer.mode": cam_opt_mode, "steps-per-eval-image": eval_step, } - return ns_config + ns_data_config = { + "dataparser": "nerfstudio-data", + "data": str(data_dir), + "eval-mode": eval_mode, + } + return ns_config, ns_data_config def create_nerfstudio_dir(colmap_dir, ns_dir, image_dir): @@ -51,10 +57,11 @@ def create_nerfstudio_dir(colmap_dir, ns_dir, image_dir): item_symlink.symlink_to(relative_item) -def update_argv(nerfstudio_config): - assert sys.argv[0].endswith(".py") and len(sys.argv) == 1, "No args should be provided for the script" +def update_argv(nerfstudio_config, follow_up=False): + if not follow_up: + assert sys.argv[0].endswith(".py") and len(sys.argv) == 1, "No args should be provided for the script" for k, v in nerfstudio_config.items(): - if k == "method": + if k in ("method", "dataparser"): sys.argv.append(f"{v}") else: sys.argv.append(f"--{k}") @@ -62,14 +69,14 @@ def update_argv(nerfstudio_config): print_with_colour(" ".join(sys.argv)) -def run_nerfstudio(ns_config): - logger.info(f"Running '{ns_config['method']}' on {ns_config['data']}") +def run_nerfstudio(ns_config, ns_data_config): + logger.info(f"Running '{ns_config['method']}' on {ns_data_config['data']}") logging.disable(logging.DEBUG) update_argv(ns_config) + update_argv(ns_data_config, follow_up=True) train_entrypoint() sys.argv = [sys.argv[0]] - ns_data = Path(ns_config["data"]) - folder_name = ns_data.name if ns_data.is_dir() else ns_data.parent.name + folder_name = ns_config["experiment-name"] # rename nerfacto-big or nerfacto-huge to nerfacto, splatfacto-big to splatfacto method_dir_name = ns_config["method"].replace("-big", "").replace("-huge", "") output_log_dir = Path(ns_config["output-dir"]) / folder_name / method_dir_name From 1423cba44d7833c4312d2bb8d73d2ee08a3253c1 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 12:04:20 +0100 Subject: [PATCH 36/83] feat: add script to get train-eval from time window --- scripts/reconstruction_benchmark/nerf_json.py | 69 +++++++++++++------ 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index fa702c8..289abd6 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -1,35 +1,62 @@ from copy import deepcopy from pathlib import Path +from oxford_spires_utils.trajectory.file_interfaces.nerf import NeRFTrajReader from oxford_spires_utils.trajectory.nerf_json_handler import NeRFJsonHandler +from oxford_spires_utils.trajectory.utils import pose_to_ply -def merge_two_json(json1, json2): - assert json1.traj.keys() == json2.traj.keys() - new_json = deepcopy(json1) - new_json.traj["frames"] += json2.traj["frames"] - return new_json +def split_json(json_file, start_time, end_time, new_name): + nerf_json_handler = NeRFJsonHandler(json_file) + nerf_json_handler.keep_timestamp_only(start_time, end_time) + save_path = Path(json_file).parent / f"{new_name}.json" + nerf_json_handler.save_json(save_path) + nerf_traj = NeRFTrajReader(save_path) + nerf_pose = nerf_traj.read_file() + pose_ply_file = Path(json_file).parent / f"{new_name}.ply" + pose_to_ply(nerf_pose, pose_ply_file) -image_dir = "/home/docker_dev/oxford_spires_dataset/data/roq-full/images" -json_train_file = "/home/docker_dev/oxford_spires_dataset/data/roq-full/outputs/colmap/seq_1_fountain.json" -json_eval_file = "/home/docker_dev/oxford_spires_dataset/data/roq-full/outputs/colmap/seq_1_fountain_back.json" -new_image_dir = Path(image_dir).parent.resolve() / "images_train_eval" -merged_json_file = Path(json_train_file).parent / "merged.json" +def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, merged_json_file): + json_train = NeRFJsonHandler(json_train_file) + json_eval = NeRFJsonHandler(json_eval_file) + json_train.rename_filename( + old_folder="images", new_folder=new_image_dir.stem, prefix="train_", base_folder=str(Path(image_dir).parent) + ) + json_eval.rename_filename( + old_folder="images", new_folder=new_image_dir.stem, prefix="eval_", base_folder=str(Path(image_dir).parent) + ) + # merge + assert json_train.traj.keys() == json_eval.traj.keys() + new_json = deepcopy(json_train) + new_json.traj["frames"] += json_eval.traj["frames"] + new_json.save_json(str(merged_json_file)) -json_train = NeRFJsonHandler(json_train_file) -json_eval = NeRFJsonHandler(json_eval_file) -json_train.rename_filename( - old_folder="images", new_folder=new_image_dir.stem, prefix="train_", base_folder=str(Path(image_dir).parent) -) -json_eval.rename_filename( - old_folder="images", new_folder=new_image_dir.stem, prefix="eval_", base_folder=str(Path(image_dir).parent) -) +dataset_folder = "/home/docker_dev/oxford_spires_dataset/data/roq-full" -# merge -new_json = merge_two_json(json_train, json_eval) -new_json.save_json(str(merged_json_file)) +train_name = "seq_1_fountain" +start_time = 1710338123.042936934 +end_time = 1710338186.342030327 +eval_name = "seq_1_fountain_back" +start_time = 1710338353.039451086 +end_time = 1710338386.638942551 + +dataset_folder = Path(dataset_folder).resolve() +colmap_folder = dataset_folder / "outputs/colmap" +json_file = colmap_folder / "transforms.json" + +split_json(json_file, start_time, end_time, train_name) +split_json(json_file, start_time, end_time, eval_name) + + +image_dir = dataset_folder / "images" +json_train_file = colmap_folder / (train_name + ".json") +json_eval_file = colmap_folder / (eval_name + "json") +new_image_dir = Path(image_dir).parent / "images_train_eval" +merged_json_file = colmap_folder / "transforms_train_eval.json" + +merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, merged_json_file) # create json with the new train/eval prefix From 06193097b843067b963e8fecd275edc0ffc9432c Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 13:49:38 +0100 Subject: [PATCH 37/83] feat: support undistorted image --- scripts/reconstruction_benchmark/nerf_json.py | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index 289abd6..fa67585 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -1,20 +1,22 @@ from copy import deepcopy from pathlib import Path +from nerf import create_nerfstudio_dir + from oxford_spires_utils.trajectory.file_interfaces.nerf import NeRFTrajReader from oxford_spires_utils.trajectory.nerf_json_handler import NeRFJsonHandler from oxford_spires_utils.trajectory.utils import pose_to_ply -def split_json(json_file, start_time, end_time, new_name): +def split_json(json_file, start_time, end_time, save_path): nerf_json_handler = NeRFJsonHandler(json_file) nerf_json_handler.keep_timestamp_only(start_time, end_time) - save_path = Path(json_file).parent / f"{new_name}.json" + # save_path = Path(json_file).parent / f"{new_name}.json" nerf_json_handler.save_json(save_path) nerf_traj = NeRFTrajReader(save_path) nerf_pose = nerf_traj.read_file() - pose_ply_file = Path(json_file).parent / f"{new_name}.ply" + pose_ply_file = save_path.with_suffix(".ply") pose_to_ply(nerf_pose, pose_ply_file) @@ -44,19 +46,27 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, start_time = 1710338353.039451086 end_time = 1710338386.638942551 +use_undistorted_image = True + dataset_folder = Path(dataset_folder).resolve() colmap_folder = dataset_folder / "outputs/colmap" +if use_undistorted_image: + colmap_folder = colmap_folder / "dense" json_file = colmap_folder / "transforms.json" -split_json(json_file, start_time, end_time, train_name) -split_json(json_file, start_time, end_time, eval_name) +train_save_path = Path(json_file).parent / f"{train_name}.json" +eval_save_path = Path(json_file).parent / f"{eval_name}.json" +split_json(json_file, start_time, end_time, train_save_path) +split_json(json_file, start_time, end_time, eval_save_path) -image_dir = dataset_folder / "images" +image_dir = dataset_folder / "images" if not use_undistorted_image else colmap_folder / "images" json_train_file = colmap_folder / (train_name + ".json") -json_eval_file = colmap_folder / (eval_name + "json") -new_image_dir = Path(image_dir).parent / "images_train_eval" +json_eval_file = colmap_folder / (eval_name + ".json") +new_image_dir = image_dir.parent / "images_train_eval" merged_json_file = colmap_folder / "transforms_train_eval.json" - merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, merged_json_file) # create json with the new train/eval prefix + +ns_dir = dataset_folder / "outputs" / "nerfstudio" / (dataset_folder.stem + "_undistorted") +create_nerfstudio_dir(colmap_folder, ns_dir, new_image_dir) From c6908c397181c9787188bee256adc9da5ff1d961 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 15:03:28 +0100 Subject: [PATCH 38/83] feat: save the train json for the simple eval --- scripts/reconstruction_benchmark/nerf_json.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index fa67585..381d21e 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -26,6 +26,7 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, json_train.rename_filename( old_folder="images", new_folder=new_image_dir.stem, prefix="train_", base_folder=str(Path(image_dir).parent) ) + json_train.save_json(str(merged_json_file.with_name("transforms_train.json"))) json_eval.rename_filename( old_folder="images", new_folder=new_image_dir.stem, prefix="eval_", base_folder=str(Path(image_dir).parent) ) From 7ef2451172b20605dd5c4d4e355f461050038a43 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 15:03:54 +0100 Subject: [PATCH 39/83] fix: correct the timestamp used --- scripts/reconstruction_benchmark/nerf_json.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index 381d21e..cfac460 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -40,12 +40,12 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, dataset_folder = "/home/docker_dev/oxford_spires_dataset/data/roq-full" train_name = "seq_1_fountain" -start_time = 1710338123.042936934 -end_time = 1710338186.342030327 +train_start_time = 1710338123.042936934 +train_end_time = 1710338186.342030327 eval_name = "seq_1_fountain_back" -start_time = 1710338353.039451086 -end_time = 1710338386.638942551 +eval_start_time = 1710338353.039451086 +eval_end_time = 1710338386.638942551 use_undistorted_image = True @@ -57,8 +57,8 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, train_save_path = Path(json_file).parent / f"{train_name}.json" eval_save_path = Path(json_file).parent / f"{eval_name}.json" -split_json(json_file, start_time, end_time, train_save_path) -split_json(json_file, start_time, end_time, eval_save_path) +split_json(json_file, train_start_time, train_end_time, train_save_path) +split_json(json_file, eval_start_time, eval_end_time, eval_save_path) image_dir = dataset_folder / "images" if not use_undistorted_image else colmap_folder / "images" From d1e76df9e84b28a0fbe22a04de77eb3c0cab9b42 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 15:11:28 +0100 Subject: [PATCH 40/83] refactor: add sift max num features --- scripts/reconstruction_benchmark/sfm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/reconstruction_benchmark/sfm.py b/scripts/reconstruction_benchmark/sfm.py index d335e56..4fd5194 100644 --- a/scripts/reconstruction_benchmark/sfm.py +++ b/scripts/reconstruction_benchmark/sfm.py @@ -55,6 +55,7 @@ def run_colmap( camera_model="OPENCV_FISHEYE", matcher="vocab_tree_matcher", loop_detection_period=10, + sift_max_num_features=8192, max_image_size=1000, ): logger.debug(f"Running colmap; img_path {image_path}; output: {output_path}, {camera_model}") @@ -70,6 +71,7 @@ def run_colmap( f"--database_path {database_path}", "--ImageReader.single_camera_per_folder 1", f"--ImageReader.camera_model {camera_model}", + f"--SiftExtraction.max_num_features {sift_max_num_features}", ] colmap_feature_extractor_cmd = " ".join(colmap_feature_extractor_cmd) logger.info(f"Running {colmap_feature_extractor_cmd}") From 88c081bdffc95ead525048407239355ea3167f8a Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 15:23:05 +0100 Subject: [PATCH 41/83] feat: add commented script for nvs benchamrk with eval defined by filename --- scripts/reconstruction_benchmark/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index c7e8f33..fbbf566 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -246,6 +246,9 @@ def evaluate_reconstruction(self, input_cloud_path): recon_benchmark.compute_sim3() recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") - # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms.json", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") recon_benchmark.run_nerfstudio("splatfacto") From d83623a886f44e7db3eb338b614c36e52548ac56 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 16:16:32 +0100 Subject: [PATCH 42/83] feat: add commented script for splatfacto ang big version --- scripts/reconstruction_benchmark/main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index fbbf566..1d82de2 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -250,5 +250,10 @@ def evaluate_reconstruction(self, input_cloud_path): # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") recon_benchmark.run_nerfstudio("splatfacto") From 349d65d02147267384f50c67a3d01c4d6ce6ba5b Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 10 Oct 2024 16:16:49 +0100 Subject: [PATCH 43/83] refactor: print nerf output folder name first --- scripts/reconstruction_benchmark/nerf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index a94a3f1..31308f9 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -84,7 +84,7 @@ def run_nerfstudio(ns_config, ns_data_config): latest_output_config = lastest_output_folder / "config.yml" # evaluate renders - logger.info(f"Evaluating from {lastest_output_folder}") + logger.info(f"Evaluating {latest_output_folder.name} from {lastest_output_folder}") render_dir = lastest_output_folder / "renders" run_nerfstudio_eval(latest_output_config, render_dir) logging.disable(logging.NOTSET) From 7b6c53a3e90a7dff707fbf2424b7f08ebbeb32c0 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 11 Oct 2024 16:00:42 +0100 Subject: [PATCH 44/83] feat: sort json frames --- oxford_spires_utils/trajectory/nerf_json_handler.py | 3 +++ scripts/reconstruction_benchmark/nerf_json.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/oxford_spires_utils/trajectory/nerf_json_handler.py b/oxford_spires_utils/trajectory/nerf_json_handler.py index ca18246..dac6e5a 100644 --- a/oxford_spires_utils/trajectory/nerf_json_handler.py +++ b/oxford_spires_utils/trajectory/nerf_json_handler.py @@ -20,6 +20,9 @@ def save_json(self, json_path): def get_n_frames(self): return len(self.traj["frames"]) + def sort_frames(self): + self.traj["frames"].sort(key=lambda x: x["file_path"]) + def sync_with_folder(self, folder_path, valid_ext=".jpg"): # get all files in subfolder ref_files = list(Path(folder_path).glob("**/*" + valid_ext)) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index cfac460..2a28108 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -10,8 +10,8 @@ def split_json(json_file, start_time, end_time, save_path): nerf_json_handler = NeRFJsonHandler(json_file) + nerf_json_handler.sort_frames() nerf_json_handler.keep_timestamp_only(start_time, end_time) - # save_path = Path(json_file).parent / f"{new_name}.json" nerf_json_handler.save_json(save_path) nerf_traj = NeRFTrajReader(save_path) From d0b8ff58327bc9afce498a2c6cfa8aaf92751456 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 11 Oct 2024 16:23:06 +0100 Subject: [PATCH 45/83] feat: sort timestamp in export json --- scripts/reconstruction_benchmark/sfm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/reconstruction_benchmark/sfm.py b/scripts/reconstruction_benchmark/sfm.py index 4fd5194..beb0704 100644 --- a/scripts/reconstruction_benchmark/sfm.py +++ b/scripts/reconstruction_benchmark/sfm.py @@ -188,6 +188,7 @@ def export_json(input_bin_dir, json_file_name="transforms.json", output_dir=None frame["mask_path"] = camera_mask_path.relative_to(camera_mask_path.parent.parent).as_posix() frames.append(frame) + frames = sorted(frames, key=lambda x: x["file_path"]) out = {} out["camera_model"] = camera.model From 4256c043b330942aa1e7db261e15a4bf990dc9b5 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 11 Oct 2024 17:07:13 +0100 Subject: [PATCH 46/83] refactor: rename split json to select json with time range --- scripts/reconstruction_benchmark/nerf_json.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index 2a28108..1d02513 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -8,7 +8,7 @@ from oxford_spires_utils.trajectory.utils import pose_to_ply -def split_json(json_file, start_time, end_time, save_path): +def select_json_with_time_range(json_file, start_time, end_time, save_path): nerf_json_handler = NeRFJsonHandler(json_file) nerf_json_handler.sort_frames() nerf_json_handler.keep_timestamp_only(start_time, end_time) @@ -57,8 +57,8 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, train_save_path = Path(json_file).parent / f"{train_name}.json" eval_save_path = Path(json_file).parent / f"{eval_name}.json" -split_json(json_file, train_start_time, train_end_time, train_save_path) -split_json(json_file, eval_start_time, eval_end_time, eval_save_path) +select_json_with_time_range(json_file, train_start_time, train_end_time, train_save_path) +select_json_with_time_range(json_file, eval_start_time, eval_end_time, eval_save_path) image_dir = dataset_folder / "images" if not use_undistorted_image else colmap_folder / "images" From e6a972607cfeef7e951712d70f708f658d1e34db Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 11 Oct 2024 17:25:24 +0100 Subject: [PATCH 47/83] feat: add yaml for recon benchmark configs --- config/recon_benchmark.yaml | 9 +++++ scripts/reconstruction_benchmark/main.py | 49 ++++++++++++++---------- 2 files changed, 37 insertions(+), 21 deletions(-) create mode 100644 config/recon_benchmark.yaml diff --git a/config/recon_benchmark.yaml b/config/recon_benchmark.yaml new file mode 100644 index 0000000..722fa7e --- /dev/null +++ b/config/recon_benchmark.yaml @@ -0,0 +1,9 @@ +reconstruction_benchmark: + project_folder: "/home/docker_dev/oxford_spires_dataset/data/2024-03-13-observatory-quarter-01" + gt_folder: "/home/docker_dev/oxford_spires_dataset/data/ground_truth_cloud/observatory-quarter" + + run_gt_cloud_processing: True + run_lidar_cloud_processing: True + run_colmap: True + run_mvs: True + run_nerfstudio: True \ No newline at end of file diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 1d82de2..85c8ec5 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -232,28 +232,35 @@ def evaluate_reconstruction(self, input_cloud_path): if __name__ == "__main__": setup_logging() logger.info("Starting Reconstruction Benchmark") + with open(Path(__file__).parent.parent.parent / "config" / "recon_benchmark.yaml", "r") as f: + recon_config = yaml.safe_load(f)["reconstruction_benchmark"] with open(Path(__file__).parent.parent.parent / "config" / "sensor.yaml", "r") as f: sensor_config = yaml.safe_load(f)["sensor"] sensor = Sensor(**sensor_config) - project_folder = "/home/oxford_spires_dataset/data/2024-03-13-observatory-quarter-01" - recon_benchmark = ReconstructionBenchmark(project_folder, gt_folder, sensor) - recon_benchmark.load_lidar_gt_transform() - recon_benchmark.process_gt_cloud() - recon_benchmark.process_lidar_clouds() - recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_cloud_merged_path) - recon_benchmark.run_colmap() - recon_benchmark.run_openmvs() - recon_benchmark.compute_sim3() - recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) - undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") - # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) - recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") - recon_benchmark.run_nerfstudio("splatfacto") + recon_benchmark = ReconstructionBenchmark(recon_config["project_folder"], recon_config["gt_folder"], sensor) + recon_benchmark.load_lidar_gt_transform() + if recon_config["run_gt_cloud_processing"]: + recon_benchmark.process_gt_cloud() + if recon_config["run_lidar_cloud_processing"]: + recon_benchmark.process_lidar_clouds() + recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_cloud_merged_path) + recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_occ_benchmark_file) + if recon_config["run_colmap"]: + recon_benchmark.run_colmap("sequential_matcher") + if recon_config["run_mvs"]: + recon_benchmark.run_openmvs() + recon_benchmark.compute_sim3() + recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) + if recon_config["run_nerfstudio"]: + # undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") + # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") + recon_benchmark.run_nerfstudio("splatfacto") From 1bceb2a218ffd53e2e47a4737e11fb389dd9f9f6 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Fri, 11 Oct 2024 18:37:36 +0100 Subject: [PATCH 48/83] feat: add config file path to argparser --- scripts/reconstruction_benchmark/main.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 85c8ec5..56ffe9f 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -1,3 +1,4 @@ +import argparse import logging import shutil from copy import deepcopy @@ -229,10 +230,22 @@ def evaluate_reconstruction(self, input_cloud_path): save_error_cloud(input_cloud_np, gt_cloud_np, str(error_cloud_file)) +def get_args(): + parser = argparse.ArgumentParser(description="Reconstruction Benchmark") + default_recon_config_file = Path(__file__).parent.parent.parent / "config" / "recon_benchmark.yaml" + parser.add_argument( + "--config-file", + type=str, + default=str(default_recon_config_file), + ) + return parser.parse_args() + + if __name__ == "__main__": setup_logging() + recon_config_file = get_args().config_file logger.info("Starting Reconstruction Benchmark") - with open(Path(__file__).parent.parent.parent / "config" / "recon_benchmark.yaml", "r") as f: + with open(recon_config_file, "r") as f: recon_config = yaml.safe_load(f)["reconstruction_benchmark"] with open(Path(__file__).parent.parent.parent / "config" / "sensor.yaml", "r") as f: sensor_config = yaml.safe_load(f)["sensor"] From 62f45d34369b1dcfab52dfdbebc7076815ae4c22 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 00:31:49 +0100 Subject: [PATCH 49/83] feat: add separate flag for lidar cloud eval --- config/recon_benchmark.yaml | 2 ++ scripts/reconstruction_benchmark/main.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/config/recon_benchmark.yaml b/config/recon_benchmark.yaml index 722fa7e..4ccfa7c 100644 --- a/config/recon_benchmark.yaml +++ b/config/recon_benchmark.yaml @@ -4,6 +4,8 @@ reconstruction_benchmark: run_gt_cloud_processing: True run_lidar_cloud_processing: True + run_lidar_cloud_evaluation: True run_colmap: True run_mvs: True + run_mvs_evaluation: True run_nerfstudio: True \ No newline at end of file diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 56ffe9f..1c0049d 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -257,6 +257,7 @@ def get_args(): recon_benchmark.process_gt_cloud() if recon_config["run_lidar_cloud_processing"]: recon_benchmark.process_lidar_clouds() + if recon_config["run_lidar_cloud_evaluation"]: recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_cloud_merged_path) recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_occ_benchmark_file) if recon_config["run_colmap"]: @@ -264,6 +265,7 @@ def get_args(): if recon_config["run_mvs"]: recon_benchmark.run_openmvs() recon_benchmark.compute_sim3() + if recon_config["run_mvs_evaluation"]: recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) if recon_config["run_nerfstudio"]: # undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") From eb4b855499574c0518c7a1ebd6e45da195306238 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 00:33:09 +0100 Subject: [PATCH 50/83] refactor: use a bigger undistorted image size for mvs --- scripts/reconstruction_benchmark/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 1c0049d..75da541 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -70,7 +70,7 @@ def __init__(self, project_folder, gt_folder, sensor): self.openmvs_bin = "/usr/local/bin/OpenMVS" self.mvs_output_folder = self.output_folder / "mvs" self.mvs_output_folder.mkdir(exist_ok=True, parents=True) - self.colmap_undistort_max_image_size = 600 + self.colmap_undistort_max_image_size = 1000 self.ns_data_dir = self.output_folder / "nerfstudio" / self.project_folder.name self.metric_json_filename = "transforms_metric.json" From 5556d90fc7c436ec4774af6c94085f4fd9616f40 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 01:22:24 +0100 Subject: [PATCH 51/83] refactor: move scaled mvs fname to init --- scripts/reconstruction_benchmark/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 75da541..3511a18 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -70,6 +70,7 @@ def __init__(self, project_folder, gt_folder, sensor): self.openmvs_bin = "/usr/local/bin/OpenMVS" self.mvs_output_folder = self.output_folder / "mvs" self.mvs_output_folder.mkdir(exist_ok=True, parents=True) + self.scaled_mvs_cloud_gt_frame_file = self.recon_benchmark_dir / "OpenMVS_dense_cloud_gt_frame.pcd" self.colmap_undistort_max_image_size = 1000 self.ns_data_dir = self.output_folder / "nerfstudio" / self.project_folder.name @@ -196,7 +197,6 @@ def compute_sim3(self): mvs_cloud_file = self.mvs_output_folder / "scene_dense_nerf_world.ply" self.scaled_mvs_cloud_file = self.mvs_output_folder / "OpenMVS_dense_cloud_metric.pcd" rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, self.scaled_mvs_cloud_file) - self.scaled_mvs_cloud_gt_frame_file = self.recon_benchmark_dir / "OpenMVS_dense_cloud_gt_frame.pcd" transform_cloud_to_gt_frame( self.scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file ) From 4d9a3dcc999e0281ec082e1f0fca4be06b5af122 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 02:21:56 +0100 Subject: [PATCH 52/83] feat: use all cpu cores for kdtree query --- oxford_spires_utils/eval.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/oxford_spires_utils/eval.py b/oxford_spires_utils/eval.py index d3c8806..622b03b 100644 --- a/oxford_spires_utils/eval.py +++ b/oxford_spires_utils/eval.py @@ -9,7 +9,7 @@ def compute_p2p_distance(query_cloud: np.ndarray, reference_cloud: np.ndarray): ref_kd_tree = KDTree(reference_cloud) - distances, _ = ref_kd_tree.query(query_cloud) + distances, _ = ref_kd_tree.query(query_cloud, workers=-1) return distances From d2e90e3b151640f6c5b10906e54693bcf76acb8f Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 13:26:34 +0100 Subject: [PATCH 53/83] refactor: remove ns_metric_json before symlink --- scripts/reconstruction_benchmark/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 3511a18..2d12f42 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -201,8 +201,8 @@ def compute_sim3(self): self.scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file ) ns_metric_json_file = self.ns_data_dir / self.metric_json_filename - if not ns_metric_json_file.exists(): - ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) # TODO remove old ones? + ns_metric_json_file.unlink(missing_ok=True) + ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) def run_nerfstudio( self, method="nerfacto", ns_data_dir=None, json_filename="transforms_metric.json", eval_mode="fraction" From 33d78c8b1fa17813aef5539cd0ca04edfb1c8d3f Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 13:36:34 +0100 Subject: [PATCH 54/83] feat: add f1_score to eval metrics --- oxford_spires_utils/eval.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/oxford_spires_utils/eval.py b/oxford_spires_utils/eval.py index 622b03b..e6e73e3 100644 --- a/oxford_spires_utils/eval.py +++ b/oxford_spires_utils/eval.py @@ -30,6 +30,7 @@ def get_recon_metrics( distances = compute_p2p_distance(gt_cloud, input_cloud) completeness = np.mean(distances) recall = np.sum(distances < recall_threshold) / len(distances) + f1_score = 2 * (precision * recall) / (precision + recall) print("Done!") return { @@ -37,6 +38,7 @@ def get_recon_metrics( "precision": precision, "completeness": completeness, "recall": recall, + "f1_score": f1_score, } From 49145c9ecb4505b2c8dc6a095625e0b4a6a24bea Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 13:40:26 +0100 Subject: [PATCH 55/83] refactor: define PR threshold --- scripts/reconstruction_benchmark/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 2d12f42..c92431d 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -225,7 +225,7 @@ def evaluate_reconstruction(self, input_cloud_path): removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) - logger.info(get_recon_metrics(input_cloud_np, gt_cloud_np)) + logger.info(get_recon_metrics(input_cloud_np, gt_cloud_np, precision_threshold=0.05, recall_threshold=0.05)) error_cloud_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_error.pcd") save_error_cloud(input_cloud_np, gt_cloud_np, str(error_cloud_file)) From 7259f0145ef50daee029a86abaf68a212dd6b5c1 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 13:51:49 +0100 Subject: [PATCH 56/83] refactor: use logger in eval --- oxford_spires_utils/eval.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/oxford_spires_utils/eval.py b/oxford_spires_utils/eval.py index e6e73e3..132af14 100644 --- a/oxford_spires_utils/eval.py +++ b/oxford_spires_utils/eval.py @@ -1,3 +1,4 @@ +import logging from pathlib import Path import matplotlib.pyplot as plt @@ -6,6 +7,8 @@ from matplotlib.colors import LinearSegmentedColormap from scipy.spatial import cKDTree as KDTree +logger = logging.getLogger(__name__) + def compute_p2p_distance(query_cloud: np.ndarray, reference_cloud: np.ndarray): ref_kd_tree = KDTree(reference_cloud) @@ -21,25 +24,25 @@ def get_recon_metrics( ): assert isinstance(input_cloud, np.ndarray) and isinstance(gt_cloud, np.ndarray) assert input_cloud.shape[1] == 3 and gt_cloud.shape[1] == 3 - print("Computing Accuracy and Precision ...") + logger.info(f"Computing Accuracy and Precision ({precision_threshold}) ...") distances = compute_p2p_distance(input_cloud, gt_cloud) accuracy = np.mean(distances) precision = np.sum(distances < precision_threshold) / len(distances) - print("Computing Completeness and Recall ...") + logger.info(f"Computing Completeness and Recall ({recall_threshold}) ...") distances = compute_p2p_distance(gt_cloud, input_cloud) completeness = np.mean(distances) recall = np.sum(distances < recall_threshold) / len(distances) f1_score = 2 * (precision * recall) / (precision + recall) - print("Done!") - return { + results = { "accuracy": accuracy, "precision": precision, "completeness": completeness, "recall": recall, "f1_score": f1_score, } + return results def save_error_cloud(input_cloud: np.ndarray, reference_cloud: np.ndarray, save_path, cmap="bgyr"): From dae1038173b38ab6084bf424f2eaa39c725be487 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 16:51:43 +0100 Subject: [PATCH 57/83] feat: use multiple PR threshold --- oxford_spires_utils/eval.py | 35 ++++++++++++++++++++++++ scripts/reconstruction_benchmark/main.py | 4 +-- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/oxford_spires_utils/eval.py b/oxford_spires_utils/eval.py index 132af14..42e76d1 100644 --- a/oxford_spires_utils/eval.py +++ b/oxford_spires_utils/eval.py @@ -45,6 +45,41 @@ def get_recon_metrics( return results +def get_recon_metrics_multi_thresholds( + input_cloud: np.ndarray, + gt_cloud: np.ndarray, + thresholds: list = [0.02, 0.05, 0.1], +): + assert isinstance(input_cloud, np.ndarray) and isinstance(gt_cloud, np.ndarray) + assert input_cloud.shape[1] == 3 and gt_cloud.shape[1] == 3 + results = [] + + logger.info("Computing Accuracy and Precision ...") + input_to_gt_dist = compute_p2p_distance(input_cloud, gt_cloud) + accuracy = np.mean(input_to_gt_dist) + + logger.info("Computing Completeness and Recall ...") + gt_to_input_dist = compute_p2p_distance(gt_cloud, input_cloud) + completeness = np.mean(gt_to_input_dist) + + logger.info(f"Accuracy: {accuracy:.4f}, Completeness: {completeness:.4f}") + results.append({"accuracy": accuracy, "completeness": completeness}) + for threshold in thresholds: + precision = np.sum(input_to_gt_dist < threshold) / len(input_to_gt_dist) + recall = np.sum(gt_to_input_dist < threshold) / len(gt_to_input_dist) + f1_score = 2 * (precision * recall) / (precision + recall) + results.append( + { + "threshold": threshold, + "precision": precision, + "recall": recall, + "f1_score": f1_score, + } + ) + logger.info(f"threshold {threshold} m, precision: {precision:.4f}, recall: {recall:.4f}, f1: {f1_score:.4f}") + return results + + def save_error_cloud(input_cloud: np.ndarray, reference_cloud: np.ndarray, save_path, cmap="bgyr"): def get_BGYR_colourmap(): colours = [ diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index c92431d..8a39cee 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -13,7 +13,7 @@ from sfm import export_json, rescale_colmap_json, run_colmap from oxford_spires_utils.bash_command import print_with_colour -from oxford_spires_utils.eval import get_recon_metrics, save_error_cloud +from oxford_spires_utils.eval import get_recon_metrics_multi_thresholds, save_error_cloud from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds from oxford_spires_utils.se3 import is_se3_matrix from oxford_spires_utils.sensor import Sensor @@ -225,7 +225,7 @@ def evaluate_reconstruction(self, input_cloud_path): removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) - logger.info(get_recon_metrics(input_cloud_np, gt_cloud_np, precision_threshold=0.05, recall_threshold=0.05)) + _ = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=[0.02, 0.05, 0.1]) error_cloud_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_error.pcd") save_error_cloud(input_cloud_np, gt_cloud_np, str(error_cloud_file)) From 77aaf4cb9084ee1bc074b862cb48d6a3d494eabd Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 16:52:04 +0100 Subject: [PATCH 58/83] feat: save recon metrics to csv --- scripts/reconstruction_benchmark/main.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 8a39cee..303a022 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -1,4 +1,5 @@ import argparse +import csv import logging import shutil from copy import deepcopy @@ -225,7 +226,14 @@ def evaluate_reconstruction(self, input_cloud_path): removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) - _ = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=[0.02, 0.05, 0.1]) + recon_metrics = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=[0.02, 0.05, 0.1]) + error_csv_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_metrics.csv") + with open(error_csv_file, mode="w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=recon_metrics[1].keys() | recon_metrics[0].keys()) + writer.writeheader() + for metric in recon_metrics: + writer.writerow(metric) + error_cloud_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_error.pcd") save_error_cloud(input_cloud_np, gt_cloud_np, str(error_cloud_file)) From f7a3553004b0b28b23cec9b1f94c4bd559012652 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 18:57:48 +0100 Subject: [PATCH 59/83] refactor: remove old lidar eval function --- scripts/reconstruction_benchmark/main.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 303a022..4c07f7f 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -94,15 +94,6 @@ def process_gt_cloud(self): self.gt_cloud_individual_pcd_folder, self.cloud_downsample_voxel_size, self.gt_cloud_merged_path ) - def evaluate_lidar_clouds(self): - evaluate_lidar_cloud( - self.lidar_output_folder, - self.individual_clouds_folder, - self.gt_octree_path, - self.gt_cloud_merged_path, - self.octomap_resolution, - ) - def load_lidar_gt_transform(self, transform_matrix_path=None): if transform_matrix_path is None: transform_matrix_path = self.project_folder / "T_gt_lidar.txt" From 2a6734ef6f41d9ba19bfae7eb7fae2ada3293418 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sat, 12 Oct 2024 19:31:11 +0100 Subject: [PATCH 60/83] refactor: save metrics not in the same row --- scripts/reconstruction_benchmark/main.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 4c07f7f..4c94fdc 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -220,10 +220,13 @@ def evaluate_reconstruction(self, input_cloud_path): recon_metrics = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=[0.02, 0.05, 0.1]) error_csv_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_metrics.csv") with open(error_csv_file, mode="w", newline="") as f: - writer = csv.DictWriter(f, fieldnames=recon_metrics[1].keys() | recon_metrics[0].keys()) + writer = csv.DictWriter(f, fieldnames=recon_metrics[1].keys()) writer.writeheader() - for metric in recon_metrics: + for metric in recon_metrics[1:]: writer.writerow(metric) + writer = csv.DictWriter(f, fieldnames=recon_metrics[0].keys()) + writer.writeheader() + writer.writerow(recon_metrics[0]) error_cloud_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_error.pcd") save_error_cloud(input_cloud_np, gt_cloud_np, str(error_cloud_file)) From 3725de8b6ca1efa09f8b0f5a9f1e65a95cf65942 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 13 Oct 2024 13:44:03 +0100 Subject: [PATCH 61/83] refactor: add recon threshold in main and refactor --- scripts/reconstruction_benchmark/main.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 4c94fdc..32f32b9 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -208,17 +208,19 @@ def run_nerfstudio( final_cloud_file = run_nerfstudio(ns_config, ns_data_config) final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) - def evaluate_reconstruction(self, input_cloud_path): + def evaluate_reconstruction(self, input_cloud_path, results_dir=None): assert input_cloud_path.exists(), f"Input cloud not found at {input_cloud_path}" assert Path(input_cloud_path).suffix == ".pcd", "Input cloud must be a pcd file" assert self.gt_octree_path.exists(), f"Ground truth octree not found at {self.gt_octree_path}" - filtered_input_cloud_path = Path(input_cloud_path).with_name(f"{Path(input_cloud_path).stem}_filtered.pcd") + recon_thresholds = [0.03, 0.05, 0.1, 0.2] + results_dir = self.recon_benchmark_dir if results_dir is None else Path(results_dir) + filtered_input_cloud_path = results_dir / f"{Path(input_cloud_path).stem}_filtered.pcd" logger.info(f'Removing unknown points from "{input_cloud_path}" using {self.gt_octree_path}') removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) - recon_metrics = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=[0.02, 0.05, 0.1]) - error_csv_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_metrics.csv") + recon_metrics = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=recon_thresholds) + error_csv_file = results_dir / f"{Path(input_cloud_path).stem}_metrics.csv" with open(error_csv_file, mode="w", newline="") as f: writer = csv.DictWriter(f, fieldnames=recon_metrics[1].keys()) writer.writeheader() @@ -228,7 +230,7 @@ def evaluate_reconstruction(self, input_cloud_path): writer.writeheader() writer.writerow(recon_metrics[0]) - error_cloud_file = filtered_input_cloud_path.with_name(f"{filtered_input_cloud_path.stem}_error.pcd") + error_cloud_file = results_dir / f"{Path(input_cloud_path).stem}_error.pcd" save_error_cloud(input_cloud_np, gt_cloud_np, str(error_cloud_file)) From ce2895f4dc6065435a5639834919e5ae76f440e9 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Sun, 13 Oct 2024 22:21:52 +0100 Subject: [PATCH 62/83] fix: rename lastest ot latest --- scripts/reconstruction_benchmark/nerf.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 31308f9..216e2ec 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -80,19 +80,19 @@ def run_nerfstudio(ns_config, ns_data_config): # rename nerfacto-big or nerfacto-huge to nerfacto, splatfacto-big to splatfacto method_dir_name = ns_config["method"].replace("-big", "").replace("-huge", "") output_log_dir = Path(ns_config["output-dir"]) / folder_name / method_dir_name - lastest_output_folder = sorted([x for x in output_log_dir.glob("*") if x.is_dir()])[-1] - latest_output_config = lastest_output_folder / "config.yml" + latest_output_folder = sorted([x for x in output_log_dir.glob("*") if x.is_dir()])[-1] + latest_output_config = latest_output_folder / "config.yml" # evaluate renders - logger.info(f"Evaluating {latest_output_folder.name} from {lastest_output_folder}") - render_dir = lastest_output_folder / "renders" + logger.info(f"Evaluating {latest_output_folder.name} from {latest_output_folder}") + render_dir = latest_output_folder / "renders" run_nerfstudio_eval(latest_output_config, render_dir) logging.disable(logging.NOTSET) # export cloud export_method = "gaussian-splat" if ns_config["method"] == "splatfacto" else "pointcloud" output_cloud_file = run_nerfstudio_exporter(latest_output_config, export_method) - ns_se3, scale_matrix = load_ns_transform(lastest_output_folder) + ns_se3, scale_matrix = load_ns_transform(latest_output_folder) cloud = o3d.io.read_point_cloud(str(output_cloud_file)) cloud.transform(scale_matrix) cloud.transform(np.linalg.inv(ns_se3)) From 61b2eec5c9ed9d05461cefb026e5e706af20ba17 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 00:22:44 +0100 Subject: [PATCH 63/83] feat: not save cloud in nvs benchmark --- scripts/reconstruction_benchmark/main.py | 28 ++++++++++++++---------- scripts/reconstruction_benchmark/nerf.py | 4 +++- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 32f32b9..cbae05c 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -197,7 +197,12 @@ def compute_sim3(self): ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) def run_nerfstudio( - self, method="nerfacto", ns_data_dir=None, json_filename="transforms_metric.json", eval_mode="fraction" + self, + method="nerfacto", + ns_data_dir=None, + json_filename="transforms_metric.json", + eval_mode="fraction", + export_cloud=True, ): ns_data_dir = self.ns_data_dir if ns_data_dir is None else Path(ns_data_dir) ns_model_dir = ns_data_dir / "trained_models" @@ -205,8 +210,9 @@ def run_nerfstudio( ns_config, ns_data_config = generate_nerfstudio_config( method, ns_data_dir / json_filename, ns_model_dir, eval_mode=eval_mode ) - final_cloud_file = run_nerfstudio(ns_config, ns_data_config) - final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) + final_cloud_file = run_nerfstudio(ns_config, ns_data_config, export_cloud) + if final_cloud_file is not None: + final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) def evaluate_reconstruction(self, input_cloud_path, results_dir=None): assert input_cloud_path.exists(), f"Input cloud not found at {input_cloud_path}" @@ -273,13 +279,13 @@ def get_args(): recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) if recon_config["run_nerfstudio"]: # undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") - # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir) - # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir) + # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) + # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) + # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) + # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) + # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) + # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) + # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) + # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") recon_benchmark.run_nerfstudio("splatfacto") diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 216e2ec..5f9def1 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -69,7 +69,7 @@ def update_argv(nerfstudio_config, follow_up=False): print_with_colour(" ".join(sys.argv)) -def run_nerfstudio(ns_config, ns_data_config): +def run_nerfstudio(ns_config, ns_data_config, export_cloud=True): logger.info(f"Running '{ns_config['method']}' on {ns_data_config['data']}") logging.disable(logging.DEBUG) update_argv(ns_config) @@ -90,6 +90,8 @@ def run_nerfstudio(ns_config, ns_data_config): logging.disable(logging.NOTSET) # export cloud + if not export_cloud: + return None export_method = "gaussian-splat" if ns_config["method"] == "splatfacto" else "pointcloud" output_cloud_file = run_nerfstudio_exporter(latest_output_config, export_method) ns_se3, scale_matrix = load_ns_transform(latest_output_folder) From 7ab02c94761db53c7a1947db7cafe11a6f8643a9 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 00:23:16 +0100 Subject: [PATCH 64/83] refactor: use short loop detection period in main --- scripts/reconstruction_benchmark/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index cbae05c..52aa699 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -132,6 +132,7 @@ def run_colmap(self, matcher="vocab_tree_matcher"): matcher=matcher, camera_model=camera_model, max_image_size=self.colmap_undistort_max_image_size, + loop_detection_period=5, ) export_json( self.colmap_sparse_0_folder, From 7dbf90ba27a5358a5a177ac8676eed0f606c62e8 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 00:35:52 +0100 Subject: [PATCH 65/83] refactor: add config to run novel view synthesis benchmark --- config/recon_benchmark.yaml | 3 ++- scripts/reconstruction_benchmark/main.py | 26 ++++++++++++++---------- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/config/recon_benchmark.yaml b/config/recon_benchmark.yaml index 4ccfa7c..ad44196 100644 --- a/config/recon_benchmark.yaml +++ b/config/recon_benchmark.yaml @@ -8,4 +8,5 @@ reconstruction_benchmark: run_colmap: True run_mvs: True run_mvs_evaluation: True - run_nerfstudio: True \ No newline at end of file + run_nerfstudio: True + run_novel_view_synthesis_only: True \ No newline at end of file diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 52aa699..05d767d 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -279,14 +279,18 @@ def get_args(): if recon_config["run_mvs_evaluation"]: recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) if recon_config["run_nerfstudio"]: - # undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") - # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) - # recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) - # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) - # recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) - # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) - # recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) - # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False) - # recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False) - recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") - recon_benchmark.run_nerfstudio("splatfacto") + if recon_config["run_novel_view_synthesis_only"]: + # fmt: off + undistorted_ns_dir = recon_benchmark.ns_data_dir.with_name(recon_benchmark.ns_data_dir.name + "_undistorted") # noqa: F401 + recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + recon_benchmark.run_nerfstudio("nerfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + recon_benchmark.run_nerfstudio("splatfacto", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train.json", eval_mode="fraction", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + recon_benchmark.run_nerfstudio("splatfacto-big", json_filename="transforms_train_eval.json", eval_mode="filename", ns_data_dir=undistorted_ns_dir, export_cloud=False)# noqa + # fmt: on + else: + recon_benchmark.run_nerfstudio("nerfacto", json_filename="transforms_metric.json") + recon_benchmark.run_nerfstudio("splatfacto") From de2b2b670d3b2f41ffdb1391a6e86f47dbe66381 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 11:29:23 +0100 Subject: [PATCH 66/83] refactor: replace print with logger in eval error cloud function --- oxford_spires_utils/eval.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/oxford_spires_utils/eval.py b/oxford_spires_utils/eval.py index 42e76d1..377ff09 100644 --- a/oxford_spires_utils/eval.py +++ b/oxford_spires_utils/eval.py @@ -107,7 +107,7 @@ def get_BGYR_colourmap(): test_cloud.points = o3d.utility.Vector3dVector(input_cloud) test_cloud.colors = o3d.utility.Vector3dVector(distances_cmap[:, :3]) o3d.io.write_point_cloud(save_path, test_cloud) - print(f"Error cloud saved to {save_path}") + logger.info(f"diff cloud saved to {save_path}") if __name__ == "__main__": From 409e49158ee65a22b2f450e43016ed25c88abf07 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 11:32:48 +0100 Subject: [PATCH 67/83] feat: select json based on a image folder more flexible than timestamp selection at removing some outlier images --- scripts/reconstruction_benchmark/nerf_json.py | 45 +++++++++++++++---- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index 1d02513..a3f0b43 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -20,6 +20,23 @@ def select_json_with_time_range(json_file, start_time, end_time, save_path): pose_to_ply(nerf_pose, pose_ply_file) +def select_json_with_folder(json_file, img_folder, save_path): + nerf_json_handler = NeRFJsonHandler(json_file) + nerf_json_handler.sort_frames() + nerf_json_handler.sync_with_folder(img_folder) + nerf_json_handler.save_json(save_path) + + +def split_json_every_n(json_file, n, save_path_removed, save_path_kept): + nerf_json_handler = NeRFJsonHandler(json_file) + nerf_json_handler.sort_frames() + train_json = deepcopy(nerf_json_handler) + removed_frames = nerf_json_handler.skip_frames(8, return_removed=True) + train_json.traj["frames"] = removed_frames + train_json.save_json(save_path_removed) + nerf_json_handler.save_json(save_path_kept) + + def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, merged_json_file): json_train = NeRFJsonHandler(json_train_file) json_eval = NeRFJsonHandler(json_eval_file) @@ -37,15 +54,19 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, new_json.save_json(str(merged_json_file)) -dataset_folder = "/home/docker_dev/oxford_spires_dataset/data/roq-full" +# dataset_folder = "/home/docker_dev/oxford_spires_dataset/data/roq-full" + +# train_name = "seq_1_fountain" +# train_start_time = 1710338123.042936934 +# train_end_time = 1710338186.342030327 -train_name = "seq_1_fountain" -train_start_time = 1710338123.042936934 -train_end_time = 1710338186.342030327 +# eval_name = "seq_1_fountain_back" +# eval_start_time = 1710338353.039451086 +# eval_end_time = 1710338386.638942551 -eval_name = "seq_1_fountain_back" -eval_start_time = 1710338353.039451086 -eval_end_time = 1710338386.638942551 +dataset_folder = "/home/docker_dev/oxford_spires_dataset/data/2024-03-14-blenheim-palace-all" +train_name = "seq_5" +eval_name = "seq_1" use_undistorted_image = True @@ -54,13 +75,18 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, if use_undistorted_image: colmap_folder = colmap_folder / "dense" json_file = colmap_folder / "transforms.json" +train_image_folder = dataset_folder / "train_val_image" / "train" +eval_image_folder = dataset_folder / "train_val_image" / "eval" train_save_path = Path(json_file).parent / f"{train_name}.json" eval_save_path = Path(json_file).parent / f"{eval_name}.json" -select_json_with_time_range(json_file, train_start_time, train_end_time, train_save_path) -select_json_with_time_range(json_file, eval_start_time, eval_end_time, eval_save_path) +select_json_with_folder(json_file, train_image_folder, train_save_path) +select_json_with_folder(json_file, eval_image_folder, eval_save_path) +# select_json_with_time_range(json_file, train_start_time, train_end_time, train_save_path) +# select_json_with_time_range(json_file, eval_start_time, eval_end_time, eval_save_path) +# split_json_every_n(train_save_path, n=8, save_path_kept = train_save_path, save_path_removed image_dir = dataset_folder / "images" if not use_undistorted_image else colmap_folder / "images" json_train_file = colmap_folder / (train_name + ".json") json_eval_file = colmap_folder / (eval_name + ".json") @@ -70,4 +96,5 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, # create json with the new train/eval prefix ns_dir = dataset_folder / "outputs" / "nerfstudio" / (dataset_folder.stem + "_undistorted") +print(f"Creating NeRF Studio directory at {ns_dir}") create_nerfstudio_dir(colmap_folder, ns_dir, new_image_dir) From 44fe4e7dc9e2abf723660d53d25b77b8fa4861a8 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 11:33:57 +0100 Subject: [PATCH 68/83] refactor: remove empty file --- scripts/reconstruction_benchmark/lidar_cloud_eval.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 scripts/reconstruction_benchmark/lidar_cloud_eval.py diff --git a/scripts/reconstruction_benchmark/lidar_cloud_eval.py b/scripts/reconstruction_benchmark/lidar_cloud_eval.py deleted file mode 100644 index e69de29..0000000 From cc8f70169f32e37c870f6f1673929041a1ad04a4 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 11:35:53 +0100 Subject: [PATCH 69/83] refactor: use 1cm as default voxel size --- scripts/reconstruction_benchmark/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 05d767d..a7cab0a 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -60,7 +60,7 @@ def __init__(self, project_folder, gt_folder, sensor): self.recon_benchmark_dir.mkdir(exist_ok=True, parents=True) # TODO: check lidar cloud folder has viewpoints and is pcd, check gt folder is pcd, check image folder is jpg/png self.octomap_resolution = 0.1 - self.cloud_downsample_voxel_size = 0.05 + self.cloud_downsample_voxel_size = 0.01 self.gt_octree_path = self.recon_benchmark_dir / "gt_cloud.bt" self.gt_cloud_merged_path = self.recon_benchmark_dir / "gt_cloud_merged.pcd" self.gt_cloud_individual_e57_folder = self.gt_folder / "individual_cloud_e57" From 38c64f1795684ed7095a5bb78dfe5cfa96bdff38 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 11:36:16 +0100 Subject: [PATCH 70/83] refactor: use largest undistorted image size by default --- scripts/reconstruction_benchmark/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index a7cab0a..6e9f495 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -72,7 +72,7 @@ def __init__(self, project_folder, gt_folder, sensor): self.mvs_output_folder = self.output_folder / "mvs" self.mvs_output_folder.mkdir(exist_ok=True, parents=True) self.scaled_mvs_cloud_gt_frame_file = self.recon_benchmark_dir / "OpenMVS_dense_cloud_gt_frame.pcd" - self.colmap_undistort_max_image_size = 1000 + self.colmap_undistort_max_image_size = -1 self.ns_data_dir = self.output_folder / "nerfstudio" / self.project_folder.name self.metric_json_filename = "transforms_metric.json" From 687cc866afd27439b26f2c9051b4dce545390141 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 11:37:04 +0100 Subject: [PATCH 71/83] refactor: remove lidar occ evaluation octomap voxel size too large and it is less accurate --- scripts/reconstruction_benchmark/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 6e9f495..01c1b35 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -270,7 +270,6 @@ def get_args(): recon_benchmark.process_lidar_clouds() if recon_config["run_lidar_cloud_evaluation"]: recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_cloud_merged_path) - recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_occ_benchmark_file) if recon_config["run_colmap"]: recon_benchmark.run_colmap("sequential_matcher") if recon_config["run_mvs"]: From f11af1b1afef4ea845e3f80bbd58ae3c07e5ee03 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 17:13:37 +0100 Subject: [PATCH 72/83] feat: evaluate nerfacto cloud --- scripts/reconstruction_benchmark/main.py | 4 +++- scripts/reconstruction_benchmark/nerf.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 01c1b35..a4a6bce 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -213,7 +213,9 @@ def run_nerfstudio( ) final_cloud_file = run_nerfstudio(ns_config, ns_data_config, export_cloud) if final_cloud_file is not None: - final_cloud_file.rename(self.recon_benchmark_dir / final_cloud_file.name) + new_final_cloud_file = self.recon_benchmark_dir / final_cloud_file.name + final_cloud_file.rename(new_final_cloud_file) + self.evaluate_reconstruction(new_final_cloud_file) def evaluate_reconstruction(self, input_cloud_path, results_dir=None): assert input_cloud_path.exists(), f"Input cloud not found at {input_cloud_path}" diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 5f9def1..27d616c 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -98,7 +98,7 @@ def run_nerfstudio(ns_config, ns_data_config, export_cloud=True): cloud = o3d.io.read_point_cloud(str(output_cloud_file)) cloud.transform(scale_matrix) cloud.transform(np.linalg.inv(ns_se3)) - final_metric_cloud_file = output_cloud_file.with_name(f'{ns_config["method"]}_cloud_metric.ply') + final_metric_cloud_file = output_cloud_file.with_name(f'{ns_config["method"]}_cloud_metric.pcd') o3d.io.write_point_cloud(str(final_metric_cloud_file), cloud) return final_metric_cloud_file From ed203616063823b9391eb1f87a3860bf672270b7 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 17:16:32 +0100 Subject: [PATCH 73/83] refactor: move cloud se3 transform to point_cloud.py --- oxford_spires_utils/point_cloud.py | 10 ++++++++++ scripts/reconstruction_benchmark/main.py | 4 ++-- scripts/reconstruction_benchmark/mvs.py | 7 ------- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/oxford_spires_utils/point_cloud.py b/oxford_spires_utils/point_cloud.py index e194f25..5abc9ae 100644 --- a/oxford_spires_utils/point_cloud.py +++ b/oxford_spires_utils/point_cloud.py @@ -174,3 +174,13 @@ def convert_e57_to_pcd(e57_file_path, pcd_file_path, check_output=True, pcd_lib= if has_colour: colours_np = np.array(saved_cloud.colors) assert np.allclose(colours_np, colours / 255, rtol=1e-5, atol=1e-8) + + +def transform_cloud_to_gt_frame(cloud_file, se3_matrix, output_cloud_file): + assert str(cloud_file).endswith(".pcd") or str(cloud_file).endswith(".ply") + assert is_se3_matrix(se3_matrix)[0], is_se3_matrix(se3_matrix)[1] + assert str(output_cloud_file).endswith(".pcd") or str(output_cloud_file).endswith(".ply") + cloud = o3d.io.read_point_cloud(str(cloud_file)) + cloud.transform(se3_matrix) + o3d.io.write_point_cloud(str(output_cloud_file), cloud) + logger.info(f"Transformed point cloud to the ground truth frame and saved as {output_cloud_file}") diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index a4a6bce..0356a3f 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -9,13 +9,13 @@ import numpy as np import open3d as o3d import yaml -from mvs import rescale_openmvs_cloud, run_openmvs, transform_cloud_to_gt_frame +from mvs import rescale_openmvs_cloud, run_openmvs from nerf import create_nerfstudio_dir, generate_nerfstudio_config, run_nerfstudio from sfm import export_json, rescale_colmap_json, run_colmap from oxford_spires_utils.bash_command import print_with_colour from oxford_spires_utils.eval import get_recon_metrics_multi_thresholds, save_error_cloud -from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds +from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds, transform_cloud_to_gt_frame from oxford_spires_utils.se3 import is_se3_matrix from oxford_spires_utils.sensor import Sensor from oxford_spires_utils.trajectory.align import align diff --git a/scripts/reconstruction_benchmark/mvs.py b/scripts/reconstruction_benchmark/mvs.py index 86e9c87..9f0096c 100644 --- a/scripts/reconstruction_benchmark/mvs.py +++ b/scripts/reconstruction_benchmark/mvs.py @@ -123,13 +123,6 @@ def rescale_openmvs_cloud(original_cloud_file, sim3_matrix, output_cloud_file): logger.info(f"Rescaled OpenMVS point cloud to to metric and save as {output_cloud_file}") -def transform_cloud_to_gt_frame(cloud_file, se3_matrix, output_cloud_file): - cloud = o3d.io.read_point_cloud(str(cloud_file)) - cloud.transform(se3_matrix) - o3d.io.write_point_cloud(str(output_cloud_file), cloud) - logger.info(f"Transformed point cloud to the ground truth frame and saved as {output_cloud_file}") - - if __name__ == "__main__": image_path = "/home/yifu/data/nerf_data_pipeline/2024-03-13-maths_1/raw" colmap_output_path = "/home/yifu/data/nerf_data_pipeline/2024-03-13-maths_1/processed/output_colmap" From 022d14dad4e7ef066fb83c0c69afa75a1a189b81 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 17:18:29 +0100 Subject: [PATCH 74/83] refactor: rename cloud transform function --- oxford_spires_utils/point_cloud.py | 4 ++-- scripts/reconstruction_benchmark/main.py | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/oxford_spires_utils/point_cloud.py b/oxford_spires_utils/point_cloud.py index 5abc9ae..1c75280 100644 --- a/oxford_spires_utils/point_cloud.py +++ b/oxford_spires_utils/point_cloud.py @@ -176,11 +176,11 @@ def convert_e57_to_pcd(e57_file_path, pcd_file_path, check_output=True, pcd_lib= assert np.allclose(colours_np, colours / 255, rtol=1e-5, atol=1e-8) -def transform_cloud_to_gt_frame(cloud_file, se3_matrix, output_cloud_file): +def transform_cloud_with_se3(cloud_file, se3_matrix, output_cloud_file): assert str(cloud_file).endswith(".pcd") or str(cloud_file).endswith(".ply") assert is_se3_matrix(se3_matrix)[0], is_se3_matrix(se3_matrix)[1] assert str(output_cloud_file).endswith(".pcd") or str(output_cloud_file).endswith(".ply") cloud = o3d.io.read_point_cloud(str(cloud_file)) cloud.transform(se3_matrix) o3d.io.write_point_cloud(str(output_cloud_file), cloud) - logger.info(f"Transformed point cloud to the ground truth frame and saved as {output_cloud_file}") + logger.info(f"Transformed point cloud with SE(3) and saved as {output_cloud_file}") diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 0356a3f..c09748c 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -15,7 +15,7 @@ from oxford_spires_utils.bash_command import print_with_colour from oxford_spires_utils.eval import get_recon_metrics_multi_thresholds, save_error_cloud -from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds, transform_cloud_to_gt_frame +from oxford_spires_utils.point_cloud import merge_downsample_vilens_slam_clouds, transform_cloud_with_se3 from oxford_spires_utils.se3 import is_se3_matrix from oxford_spires_utils.sensor import Sensor from oxford_spires_utils.trajectory.align import align @@ -190,9 +190,7 @@ def compute_sim3(self): mvs_cloud_file = self.mvs_output_folder / "scene_dense_nerf_world.ply" self.scaled_mvs_cloud_file = self.mvs_output_folder / "OpenMVS_dense_cloud_metric.pcd" rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, self.scaled_mvs_cloud_file) - transform_cloud_to_gt_frame( - self.scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file - ) + transform_cloud_with_se3(self.scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file) ns_metric_json_file = self.ns_data_dir / self.metric_json_filename ns_metric_json_file.unlink(missing_ok=True) ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) From 2bb9f8032d8a9fe0375c540500bb37511d3800f6 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 22:13:20 +0100 Subject: [PATCH 75/83] refactor: move se3 transform out from sim3 function --- config/recon_benchmark.yaml | 1 + scripts/reconstruction_benchmark/main.py | 20 +++++++++++--------- scripts/reconstruction_benchmark/mvs.py | 4 +++- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/config/recon_benchmark.yaml b/config/recon_benchmark.yaml index ad44196..fd130eb 100644 --- a/config/recon_benchmark.yaml +++ b/config/recon_benchmark.yaml @@ -6,6 +6,7 @@ reconstruction_benchmark: run_lidar_cloud_processing: True run_lidar_cloud_evaluation: True run_colmap: True + run_colmap_sim3: True run_mvs: True run_mvs_evaluation: True run_nerfstudio: True diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index c09748c..b5b6d31 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -157,15 +157,19 @@ def run_openmvs(self): num_sparse_folders = len(list(self.colmap_sparse_folder.glob("*"))) if num_sparse_folders > 1: print_with_colour(f"Multiple sparse folders found in {self.colmap_output_folder}. Using the first one.") - run_openmvs( + assert self.transform_matrix is not None, "Ground truth to lidar transform not loaded" + mvs_cloud_file = run_openmvs( self.image_folder, self.colmap_output_folder, self.colmap_sparse_0_folder, self.mvs_output_folder, self.openmvs_bin, ) + scaled_mvs_cloud_file = self.mvs_output_folder / "OpenMVS_dense_cloud_metric.pcd" + rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, scaled_mvs_cloud_file) + transform_cloud_with_se3(scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file) - def compute_sim3(self): + def compute_colmap_sim3(self): colmap_traj_file = self.colmap_output_folder / "transforms.json" rescaled_colmap_traj_file = self.colmap_output_folder / self.metric_json_filename # TODO refactor lidar_slam_traj = VilensSlamTrajReader(self.lidar_slam_traj_file).read_file() @@ -183,14 +187,11 @@ def compute_sim3(self): lidar_slam_traj_cam_frame = deepcopy(lidar_slam_traj) lidar_slam_traj_cam_frame.transform(T_base_cam, right_mul=True) # T_lidar_colmap = align(lidar_slam_traj, colmap_traj_single_cam, self.colmap_output_folder) - T_lidar_colmap = align(lidar_slam_traj_cam_frame, colmap_traj_single_cam, self.colmap_output_folder) - rescale_colmap_json(colmap_traj_file, T_lidar_colmap, rescaled_colmap_traj_file) + self.T_lidar_colmap = align(lidar_slam_traj_cam_frame, colmap_traj_single_cam, self.colmap_output_folder) + rescale_colmap_json(colmap_traj_file, self.T_lidar_colmap, rescaled_colmap_traj_file) rescaled_colmap_traj = NeRFTrajReader(rescaled_colmap_traj_file).read_file() pose_to_ply(rescaled_colmap_traj, self.colmap_output_folder / "rescaled_colmap_traj.ply", [0.0, 1.0, 0.0]) - mvs_cloud_file = self.mvs_output_folder / "scene_dense_nerf_world.ply" - self.scaled_mvs_cloud_file = self.mvs_output_folder / "OpenMVS_dense_cloud_metric.pcd" - rescale_openmvs_cloud(mvs_cloud_file, T_lidar_colmap, self.scaled_mvs_cloud_file) - transform_cloud_with_se3(self.scaled_mvs_cloud_file, self.transform_matrix, self.scaled_mvs_cloud_gt_frame_file) + ns_metric_json_file = self.ns_data_dir / self.metric_json_filename ns_metric_json_file.unlink(missing_ok=True) ns_metric_json_file.symlink_to(rescaled_colmap_traj_file) @@ -272,9 +273,10 @@ def get_args(): recon_benchmark.evaluate_reconstruction(recon_benchmark.lidar_cloud_merged_path) if recon_config["run_colmap"]: recon_benchmark.run_colmap("sequential_matcher") + if recon_config["run_colmap_sim3"]: + recon_benchmark.compute_colmap_sim3() if recon_config["run_mvs"]: recon_benchmark.run_openmvs() - recon_benchmark.compute_sim3() if recon_config["run_mvs_evaluation"]: recon_benchmark.evaluate_reconstruction(recon_benchmark.scaled_mvs_cloud_gt_frame_file) if recon_config["run_nerfstudio"]: diff --git a/scripts/reconstruction_benchmark/mvs.py b/scripts/reconstruction_benchmark/mvs.py index 9f0096c..9a3aade 100644 --- a/scripts/reconstruction_benchmark/mvs.py +++ b/scripts/reconstruction_benchmark/mvs.py @@ -82,8 +82,10 @@ def run_openmvs(image_path, colmap_output_path, sparse_folder, mvs_dir, openmvs_ logger.error(f"Failed to generate dense point cloud at {output_ply_file}") dense_ply = o3d.io.read_point_cloud(str(output_ply_file)) dense_ply.transform(colmap_to_nerf_world_transform) - o3d.io.write_point_cloud(str(output_ply_file.with_name("scene_dense_nerf_world.ply")), dense_ply) + output_file = output_ply_file.with_name("scene_dense_nerf_world.pcd") + o3d.io.write_point_cloud(str(output_file), dense_ply) logger.info("Transformed MVS point cloud to the world frame defined by the nerf convention") + return output_file # Reconstruct the mesh # reconstruct_cmd = [f"{openmvs_bin}/ReconstructMesh", "scene_dense.mvs", "-p scene_dense.ply", f"-w {mvs_dir}"] # run_command(" ".join(reconstruct_cmd), print_command=True) From 34619bfb0e5d84765fdbbeae0e5d540bd2ab235e Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Tue, 15 Oct 2024 22:13:47 +0100 Subject: [PATCH 76/83] feat: move nerfstudio cloud to gt frame with se3 --- scripts/reconstruction_benchmark/main.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index b5b6d31..ed3c724 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -210,11 +210,11 @@ def run_nerfstudio( ns_config, ns_data_config = generate_nerfstudio_config( method, ns_data_dir / json_filename, ns_model_dir, eval_mode=eval_mode ) - final_cloud_file = run_nerfstudio(ns_config, ns_data_config, export_cloud) - if final_cloud_file is not None: - new_final_cloud_file = self.recon_benchmark_dir / final_cloud_file.name - final_cloud_file.rename(new_final_cloud_file) - self.evaluate_reconstruction(new_final_cloud_file) + metric_cloud_file = run_nerfstudio(ns_config, ns_data_config, export_cloud) + if metric_cloud_file is not None: + metric_cloud_gt_frame = self.recon_benchmark_dir / (metric_cloud_file.stem + "_gt_frame.pcd") + transform_cloud_with_se3(metric_cloud_file, self.transform_matrix, metric_cloud_gt_frame) + self.evaluate_reconstruction(metric_cloud_gt_frame) def evaluate_reconstruction(self, input_cloud_path, results_dir=None): assert input_cloud_path.exists(), f"Input cloud not found at {input_cloud_path}" From 77b48e87e80bc4e86640f86a11b16f9bd9a1bae3 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 16 Oct 2024 01:12:58 +0100 Subject: [PATCH 77/83] refactor: check if image folder exist in train_eval --- scripts/reconstruction_benchmark/nerf_json.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/reconstruction_benchmark/nerf_json.py b/scripts/reconstruction_benchmark/nerf_json.py index a3f0b43..d552f8b 100644 --- a/scripts/reconstruction_benchmark/nerf_json.py +++ b/scripts/reconstruction_benchmark/nerf_json.py @@ -78,6 +78,9 @@ def merge_json_files(json_train_file, json_eval_file, image_dir, new_image_dir, train_image_folder = dataset_folder / "train_val_image" / "train" eval_image_folder = dataset_folder / "train_val_image" / "eval" +assert (train_image_folder / "images").exists(), f"{train_image_folder/'images'} does not exist" +assert (eval_image_folder / "images").exists(), f"{eval_image_folder/'images'} does not exist" + train_save_path = Path(json_file).parent / f"{train_name}.json" eval_save_path = Path(json_file).parent / f"{eval_name}.json" select_json_with_folder(json_file, train_image_folder, train_save_path) From a8c651ec65304fb10c61768b92e0b30329e5b411 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 16 Oct 2024 14:10:05 +0100 Subject: [PATCH 78/83] refactor: downsample input cloud in recon eval --- scripts/reconstruction_benchmark/main.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index ed3c724..5116611 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -225,7 +225,12 @@ def evaluate_reconstruction(self, input_cloud_path, results_dir=None): filtered_input_cloud_path = results_dir / f"{Path(input_cloud_path).stem}_filtered.pcd" logger.info(f'Removing unknown points from "{input_cloud_path}" using {self.gt_octree_path}') removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) - input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) + logger.info(f"Downsampling filtered cloud to {self.cloud_downsample_voxel_size} m") + input_cloud = o3d.io.read_point_cloud(str(filtered_input_cloud_path)) + input_cloud_downsampled = input_cloud.voxel_down_sample(voxel_size=self.cloud_downsample_voxel_size) + input_cloud_downsampled_path = results_dir / (filtered_input_cloud_path.stem + "_downsampled.pcd") + o3d.io.write_point_cloud(str(input_cloud_downsampled_path), input_cloud_downsampled) + input_cloud_np = np.asarray(input_cloud_downsampled.points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) recon_metrics = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=recon_thresholds) error_csv_file = results_dir / f"{Path(input_cloud_path).stem}_metrics.csv" From ca3d95be98e63f400e18c7989ebf9db5ed04547c Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 16 Oct 2024 15:01:33 +0100 Subject: [PATCH 79/83] refactor: downsample before filtering --- scripts/reconstruction_benchmark/main.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index 5116611..fbb6482 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -222,15 +222,16 @@ def evaluate_reconstruction(self, input_cloud_path, results_dir=None): assert self.gt_octree_path.exists(), f"Ground truth octree not found at {self.gt_octree_path}" recon_thresholds = [0.03, 0.05, 0.1, 0.2] results_dir = self.recon_benchmark_dir if results_dir is None else Path(results_dir) - filtered_input_cloud_path = results_dir / f"{Path(input_cloud_path).stem}_filtered.pcd" - logger.info(f'Removing unknown points from "{input_cloud_path}" using {self.gt_octree_path}') - removeUnknownPoints(str(input_cloud_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) logger.info(f"Downsampling filtered cloud to {self.cloud_downsample_voxel_size} m") - input_cloud = o3d.io.read_point_cloud(str(filtered_input_cloud_path)) + input_cloud = o3d.io.read_point_cloud(str(input_cloud_path)) input_cloud_downsampled = input_cloud.voxel_down_sample(voxel_size=self.cloud_downsample_voxel_size) - input_cloud_downsampled_path = results_dir / (filtered_input_cloud_path.stem + "_downsampled.pcd") + input_cloud_downsampled_path = results_dir / (input_cloud_path.stem + "_downsampled.pcd") o3d.io.write_point_cloud(str(input_cloud_downsampled_path), input_cloud_downsampled) - input_cloud_np = np.asarray(input_cloud_downsampled.points) + + logger.info(f"Removing unknown points using {self.gt_octree_path}") + filtered_input_cloud_path = results_dir / f"{Path(input_cloud_downsampled_path).stem}_filtered.pcd" + removeUnknownPoints(str(input_cloud_downsampled_path), str(self.gt_octree_path), str(filtered_input_cloud_path)) + input_cloud_np = np.asarray(o3d.io.read_point_cloud(str(filtered_input_cloud_path)).points) gt_cloud_np = np.asarray(o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).points) recon_metrics = get_recon_metrics_multi_thresholds(input_cloud_np, gt_cloud_np, thresholds=recon_thresholds) error_csv_file = results_dir / f"{Path(input_cloud_path).stem}_metrics.csv" From f165b28cdbf38e0ad9d2c418cd1cedc501a58421 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Wed, 16 Oct 2024 15:07:25 +0100 Subject: [PATCH 80/83] feat: crop input cloud using gt bbox for nerfacto sky clouds --- scripts/reconstruction_benchmark/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/reconstruction_benchmark/main.py b/scripts/reconstruction_benchmark/main.py index fbb6482..dfb6258 100644 --- a/scripts/reconstruction_benchmark/main.py +++ b/scripts/reconstruction_benchmark/main.py @@ -222,8 +222,11 @@ def evaluate_reconstruction(self, input_cloud_path, results_dir=None): assert self.gt_octree_path.exists(), f"Ground truth octree not found at {self.gt_octree_path}" recon_thresholds = [0.03, 0.05, 0.1, 0.2] results_dir = self.recon_benchmark_dir if results_dir is None else Path(results_dir) - logger.info(f"Downsampling filtered cloud to {self.cloud_downsample_voxel_size} m") input_cloud = o3d.io.read_point_cloud(str(input_cloud_path)) + logger.info(f"Cropping input cloud using bounding box from {self.gt_cloud_merged_path}") + gt_bbox = o3d.io.read_point_cloud(str(self.gt_cloud_merged_path)).get_axis_aligned_bounding_box() + input_cloud = input_cloud.crop(gt_bbox) + logger.info(f"Downsampling filtered cloud to {self.cloud_downsample_voxel_size} m") input_cloud_downsampled = input_cloud.voxel_down_sample(voxel_size=self.cloud_downsample_voxel_size) input_cloud_downsampled_path = results_dir / (input_cloud_path.stem + "_downsampled.pcd") o3d.io.write_point_cloud(str(input_cloud_downsampled_path), input_cloud_downsampled) From 48c35d9f2df7de126aeb17520bb8f79022cd0085 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Thu, 17 Oct 2024 03:52:10 +0100 Subject: [PATCH 81/83] feat: filter large dist to remove outliers in nerfacto --- oxford_spires_utils/eval.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/oxford_spires_utils/eval.py b/oxford_spires_utils/eval.py index 377ff09..86bdab2 100644 --- a/oxford_spires_utils/eval.py +++ b/oxford_spires_utils/eval.py @@ -46,9 +46,7 @@ def get_recon_metrics( def get_recon_metrics_multi_thresholds( - input_cloud: np.ndarray, - gt_cloud: np.ndarray, - thresholds: list = [0.02, 0.05, 0.1], + input_cloud: np.ndarray, gt_cloud: np.ndarray, thresholds: list = [0.02, 0.05, 0.1], max_distance=2.0 ): assert isinstance(input_cloud, np.ndarray) and isinstance(gt_cloud, np.ndarray) assert input_cloud.shape[1] == 3 and gt_cloud.shape[1] == 3 @@ -56,10 +54,12 @@ def get_recon_metrics_multi_thresholds( logger.info("Computing Accuracy and Precision ...") input_to_gt_dist = compute_p2p_distance(input_cloud, gt_cloud) + input_to_gt_dist = input_to_gt_dist[input_to_gt_dist <= max_distance] accuracy = np.mean(input_to_gt_dist) logger.info("Computing Completeness and Recall ...") gt_to_input_dist = compute_p2p_distance(gt_cloud, input_cloud) + gt_to_input_dist = gt_to_input_dist[gt_to_input_dist <= max_distance] completeness = np.mean(gt_to_input_dist) logger.info(f"Accuracy: {accuracy:.4f}, Completeness: {completeness:.4f}") @@ -80,7 +80,7 @@ def get_recon_metrics_multi_thresholds( return results -def save_error_cloud(input_cloud: np.ndarray, reference_cloud: np.ndarray, save_path, cmap="bgyr"): +def save_error_cloud(input_cloud: np.ndarray, reference_cloud: np.ndarray, save_path, cmap="bgyr", max_distance=2.0): def get_BGYR_colourmap(): colours = [ (0, 0, 255), # Blue @@ -96,6 +96,8 @@ def get_BGYR_colourmap(): return cmap distances = compute_p2p_distance(input_cloud, reference_cloud) + input_cloud = input_cloud[distances <= max_distance] + distances = distances[distances <= max_distance] distances = np.clip(distances, 0, 1) if cmap == "bgyr": cmap = get_BGYR_colourmap() From 0f258b38d5f50f14f15e18fb2288c604d5bc4052 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Mon, 28 Oct 2024 09:34:52 +0000 Subject: [PATCH 82/83] refactor: add number of points of nerf cloud export --- scripts/reconstruction_benchmark/nerf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/reconstruction_benchmark/nerf.py b/scripts/reconstruction_benchmark/nerf.py index 27d616c..d8a68c2 100644 --- a/scripts/reconstruction_benchmark/nerf.py +++ b/scripts/reconstruction_benchmark/nerf.py @@ -121,6 +121,7 @@ def run_nerfstudio_exporter(config_file, export_method): "method": export_method, "load-config": config_file, "output-dir": config_file.parent, + "num-points": 100000000, } if export_method == "pointcloud": exporter_config["normal-method"] = "open3d" From 5371fe943d5e8e933a8c68168c6cd539b1ff1e07 Mon Sep 17 00:00:00 2001 From: Yifu Tao Date: Mon, 28 Oct 2024 09:35:37 +0000 Subject: [PATCH 83/83] fix: add missing import --- oxford_spires_utils/point_cloud.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/oxford_spires_utils/point_cloud.py b/oxford_spires_utils/point_cloud.py index 1c75280..59782b1 100644 --- a/oxford_spires_utils/point_cloud.py +++ b/oxford_spires_utils/point_cloud.py @@ -1,3 +1,4 @@ +import logging from pathlib import Path import numpy as np @@ -9,6 +10,8 @@ from oxford_spires_utils.se3 import is_se3_matrix, xyz_quat_xyzw_to_se3_matrix +logger = logging.getLogger(__name__) + def transform_3d_cloud(cloud_np, transform_matrix): """Apply a transformation to the point cloud."""