Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DRAFT] Mip-Splatting implementation #274

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions examples/benchmark_mipnerf360.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
# Training script for the Mip-NeRF 360 dataset

import os
import GPUtil
from concurrent.futures import ThreadPoolExecutor
import time
import glob

# 9 scenes
# scenes = ["bicycle", "bonsai", "counter", "flowers", "garden", "stump", "treehill", "kitchen", "room"]
# factors = [4, 2, 2, 4, 4, 4, 4, 2, 2]

# 7 scenes
scenes = ["bicycle", "bonsai", "counter", "garden", "stump", "kitchen", "room"]
factors = [4, 2, 2, 4, 4, 2, 2]

excluded_gpus = set([])

result_dir = "results/benchmark_mipsplatting_cuda3D"

dry_run = False

jobs = list(zip(scenes, factors))


def train_scene(gpu, scene, factor):
# train without eval
cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer_mip_splatting.py --eval_steps -1 --disable_viewer --data_factor {factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene} --antialiased --kernel_size 0.1"
print(cmd)
if not dry_run:
os.system(cmd)

# eval and render for all the ckpts
ckpts = glob.glob(f"{result_dir}/{scene}/ckpts/*.pt")
for ckpt in ckpts:
cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer_mip_splatting.py --disable_viewer --data_factor {factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene} --ckpt {ckpt} --antialiased --kernel_size 0.1"
print(cmd)
if not dry_run:
os.system(cmd)

return True


def worker(gpu, scene, factor):
print(f"Starting job on GPU {gpu} with scene {scene}\n")
train_scene(gpu, scene, factor)
print(f"Finished job on GPU {gpu} with scene {scene}\n")
# This worker function starts a job and returns when it's done.


def dispatch_jobs(jobs, executor):
future_to_job = {}
reserved_gpus = set() # GPUs that are slated for work but may not be active yet

while jobs or future_to_job:
# Get the list of available GPUs, not including those that are reserved.
all_available_gpus = set(
GPUtil.getAvailable(order="first", limit=10, maxMemory=0.1, maxLoad=0.1)
)
# all_available_gpus = set([0,1,2,3])
available_gpus = list(all_available_gpus - reserved_gpus - excluded_gpus)

# Launch new jobs on available GPUs
while available_gpus and jobs:
gpu = available_gpus.pop(0)
job = jobs.pop(0)
future = executor.submit(
worker, gpu, *job
) # Unpacking job as arguments to worker
future_to_job[future] = (gpu, job)

reserved_gpus.add(gpu) # Reserve this GPU until the job starts processing

# Check for completed jobs and remove them from the list of running jobs.
# Also, release the GPUs they were using.
done_futures = [future for future in future_to_job if future.done()]
for future in done_futures:
job = future_to_job.pop(
future
) # Remove the job associated with the completed future
gpu = job[0] # The GPU is the first element in each job tuple
reserved_gpus.discard(gpu) # Release this GPU
print(f"Job {job} has finished., rellasing GPU {gpu}")
# (Optional) You might want to introduce a small delay here to prevent this loop from spinning very fast
# when there are no GPUs available.
time.sleep(5)

print("All jobs have been processed.")


# Using ThreadPoolExecutor to manage the thread pool
with ThreadPoolExecutor(max_workers=8) as executor:
dispatch_jobs(jobs, executor)
115 changes: 115 additions & 0 deletions examples/benchmark_mipnerf360_stmt.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

how hard would it be to merge this one with the other script? Ideally an list argument could be passed in to specify the rendering factors that the evaluation happens on

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we could adapt the current benchmark.sh script into the .py style similar to the one proposed here, allowing for easier batch jobs on e.g. clusters and also streamlining the evaluation of the various new features of gsplat... I can look into it...

Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# Training script for the Mip-NeRF 360 dataset
# The model is trained with downsampling factor 8 and rendered with downsampling factor 1, 2, 4, 8

import os
import GPUtil
from concurrent.futures import ThreadPoolExecutor
import time
import glob

# 9 scenes
# scenes = ["bicycle", "bonsai", "counter", "flowers", "garden", "stump", "treehill", "kitchen", "room"]
# factors = [4, 2, 2, 4, 4, 4, 4, 2, 2]

# 7 scenes
scenes = ["bicycle", "bonsai", "counter", "garden", "stump", "kitchen", "room"]
factors = [8] * len(scenes)

excluded_gpus = set([])

# classic
result_dir = "results/benchmark_stmt"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what does stmt stands for? lol

Copy link
Collaborator

@maturk maturk Jul 18, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think Single-scale-Training-and-Multi-scale-Testing, the idea being you train with a fixed resolution (here a factor of 1/8 of the original image res) and test on various other resolutions (1x full res, 1/2, 1/4, 1/8).

# antialiased
result_dir = "results/benchmark_antialiased_stmt"
# mip-splatting
# result_dir = "results/benchmark_mipsplatting_stmt"

dry_run = False

jobs = list(zip(scenes, factors))


def train_scene(gpu, scene, factor):
# train without eval
# classic
# cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer.py --eval_steps -1 --disable_viewer --data_factor {factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene}"

# anti-aliased
# cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer.py --eval_steps -1 --disable_viewer --data_factor {factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene} --antialiased"

# mip-splatting
cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer_mip_splatting.py --eval_steps -1 --disable_viewer --data_factor {factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene} --antialiased --kernel_size 0.1"
print(cmd)
if not dry_run:
os.system(cmd)

# eval and render for all the ckpts
ckpts = glob.glob(f"{result_dir}/{scene}/ckpts/*.pt")
for ckpt in ckpts:
for test_factor in [1, 2, 4, 8]:
# classic
# cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer.py --disable_viewer --data_factor {test_factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene}_{test_factor} --ckpt {ckpt}"

# anti-aliased
# cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer.py --disable_viewer --data_factor {test_factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene}_{test_factor} --ckpt {ckpt} --antialiased"

# mip-splatting
cmd = f"OMP_NUM_THREADS=4 CUDA_VISIBLE_DEVICES={gpu} python simple_trainer_mip_splatting.py --disable_viewer --data_factor {test_factor} --data_dir data/360_v2/{scene} --result_dir {result_dir}/{scene}_{test_factor} --ckpt {ckpt} --antialiased --kernel_size 0.1"
print(cmd)
if not dry_run:
os.system(cmd)

return True


def worker(gpu, scene, factor):
print(f"Starting job on GPU {gpu} with scene {scene}\n")
train_scene(gpu, scene, factor)
print(f"Finished job on GPU {gpu} with scene {scene}\n")
# This worker function starts a job and returns when it's done.


def dispatch_jobs(jobs, executor):
future_to_job = {}
reserved_gpus = set() # GPUs that are slated for work but may not be active yet

while jobs or future_to_job:
# Get the list of available GPUs, not including those that are reserved.
all_available_gpus = set(
GPUtil.getAvailable(order="first", limit=10, maxMemory=0.1, maxLoad=0.1)
)
# all_available_gpus = set([0,1,2,3])
available_gpus = list(all_available_gpus - reserved_gpus - excluded_gpus)

# Launch new jobs on available GPUs
while available_gpus and jobs:
gpu = available_gpus.pop(0)
job = jobs.pop(0)
future = executor.submit(
worker, gpu, *job
) # Unpacking job as arguments to worker
future_to_job[future] = (gpu, job)

reserved_gpus.add(gpu) # Reserve this GPU until the job starts processing
time.sleep(2)

# Check for completed jobs and remove them from the list of running jobs.
# Also, release the GPUs they were using.
done_futures = [future for future in future_to_job if future.done()]
for future in done_futures:
job = future_to_job.pop(
future
) # Remove the job associated with the completed future
gpu = job[0] # The GPU is the first element in each job tuple
reserved_gpus.discard(gpu) # Release this GPU
print(f"Job {job} has finished., rellasing GPU {gpu}")
# (Optional) You might want to introduce a small delay here to prevent this loop from spinning very fast
# when there are no GPUs available.
time.sleep(5)

print("All jobs have been processed.")


# Using ThreadPoolExecutor to manage the thread pool
with ThreadPoolExecutor(max_workers=8) as executor:
dispatch_jobs(jobs, executor)
3 changes: 3 additions & 0 deletions examples/datasets/colmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ def __init__(
self.image_names = image_names # List[str], (num_images,)
self.image_paths = image_paths # List[str], (num_images,)
self.camtoworlds = camtoworlds # np.ndarray, (num_images, 4, 4)
self.worldtocams = np.linalg.inv(camtoworlds) # np.ndarray, (num_images, 4, 4)
self.camera_ids = camera_ids # List[int], (num_images,)
self.Ks_dict = Ks_dict # Dict of camera_id -> K
self.params_dict = params_dict # Dict of camera_id -> params
Expand Down Expand Up @@ -254,6 +255,7 @@ def __getitem__(self, item: int) -> Dict[str, Any]:
K = self.parser.Ks_dict[camera_id].copy() # undistorted K
params = self.parser.params_dict[camera_id]
camtoworlds = self.parser.camtoworlds[index]
worldtocams = self.parser.worldtocams[index]

if len(params) > 0:
# Images are distorted. Undistort them.
Expand All @@ -277,6 +279,7 @@ def __getitem__(self, item: int) -> Dict[str, Any]:
data = {
"K": torch.from_numpy(K).float(),
"camtoworld": torch.from_numpy(camtoworlds).float(),
"worldtocam": torch.from_numpy(worldtocams).float(),
"image": torch.from_numpy(image).float(),
"image_id": item, # the index of the image in the dataset
}
Expand Down
4 changes: 1 addition & 3 deletions examples/datasets/download_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
import tyro

# dataset names
dataset_names = Literal[
"mipnerf360",
]
dataset_names = Literal["mipnerf360",]

# dataset urls
urls = {"mipnerf360": "http://storage.googleapis.com/gresearch/refraw360/360_v2.zip"}
Expand Down
51 changes: 51 additions & 0 deletions examples/show_mipnerf360.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import json
import numpy as np
import glob

# 9 scenes
# scenes = ['bicycle', 'flowers', 'garden', 'stump', 'treehill', 'room', 'counter', 'kitchen', 'bonsai']

# outdoor scenes
# scenes = scenes[:5]
# indoor scenes
# scenes = scenes[5:]

# 7 scenes
scenes = ["bicycle", "bonsai", "counter", "garden", "stump", "kitchen", "room"]

result_dirs = ["results/benchmark"]
result_dirs = ["results/benchmark_antialiased"]
result_dirs = ["results/benchmark_mipsplatting"]
result_dirs = ["results/benchmark_mipsplatting_cuda3D"]

all_metrics = {"psnr": [], "ssim": [], "lpips": [], "num_GS": []}
print(result_dirs)

for scene in scenes:
print(scene, end=" ")
for result_dir in result_dirs:
json_files = glob.glob(f"{result_dir}/{scene}/stats/val_step29999.json")
for json_file in json_files:
# print(json_file)
data = json.load(open(json_file))
# print(data)

for k in ["psnr", "ssim", "lpips", "num_GS"]:
all_metrics[k].append(data[k])
print(f"{data[k]:.3f}", end=" ")
print()

latex = []
for k in ["psnr", "ssim", "lpips", "num_GS"]:
numbers = np.asarray(all_metrics[k]).mean(axis=0).tolist()
print(numbers)
numbers = [numbers]
if k == "PSNR":
numbers = [f"{x:.2f}" for x in numbers]
elif k == "num_GS":
num = numbers[0] / 1e6
numbers = [f"{num:.2f}"]
else:
numbers = [f"{x:.3f}" for x in numbers]
latex.extend(numbers)
print(" | ".join(latex))
45 changes: 45 additions & 0 deletions examples/show_mipnerf360_allscales.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import json
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually, another idea of managing these scripts is to have a mip_splatting folder under examples, in which you could have multiple scripts in there, without worrying about merging or conflicting with other scripts. I think this is a scalable way to manage the scripts when more things are supported in the future.

And you could also have a customized simple_trainer.py living in it, basically whatever you need to be customized. But this is less preferred because a customized simple_trainer.py means the mip-splatting feature would be disjoint with other future features for user to play with. So ideally we merge the trainer but could put the scripts into an isolated folder.

import numpy as np
import glob

# 9 scenes
# scenes = ['bicycle', 'flowers', 'garden', 'stump', 'treehill', 'room', 'counter', 'kitchen', 'bonsai']

# outdoor scenes
# scenes = scenes[:5]
# indoor scenes
# scenes = scenes[5:]

# 7 scenes
scenes = ["bicycle", "bonsai", "counter", "garden", "stump", "kitchen", "room"]

result_dirs = ["results/benchmark_stmt"]
# result_dirs = ["results/benchmark_antialiased_stmt"]
# result_dirs = ["results/benchmark_mipsplatting_stmt"]

all_metrics = {"psnr": [], "ssim": [], "lpips": [], "num_GS": []}
print(result_dirs)

for scene in scenes:
print(scene)
for result_dir in result_dirs:
for scale in ["8", "4", "2", "1"]:
json_files = glob.glob(f"{result_dir}/{scene}_{scale}/stats/val_step29999.json")
for json_file in json_files:
data = json.load(open(json_file))
for k in ["psnr", "ssim", "lpips", "num_GS"]:
all_metrics[k].append(data[k])
print(f"{data[k]:.3f}", end=" ")
print()

latex = []
for k in ["psnr", "ssim", "lpips"]:
numbers = np.asarray(all_metrics[k]).reshape(-1, 4).mean(axis=0).tolist()
numbers = numbers + [np.mean(numbers)]
print(numbers)
if k == "psnr":
numbers = [f"{x:.2f}" for x in numbers]
else:
numbers = [f"{x:.3f}" for x in numbers]
latex.extend(numbers)
print(" | ".join(latex))
Loading
Loading