From 6a1a299086f407ad5eb883df956412be5bb1985c Mon Sep 17 00:00:00 2001 From: Kacper Kania <57393775+kacperkan@users.noreply.github.com> Date: Mon, 29 Jun 2020 21:32:52 +0200 Subject: [PATCH 1/5] Docker data processing (#1) * Add Dockerfile and script * Change path in the volume * Add folder for the output data * Add docker file and fix paths to be compatible with python3 --- .dockerignore | 1 + .gitignore | 240 +++++++++++++++++++++++++++++++++++++++ Dockerfile | 145 +++++++++++++++++++++++ README.md | 11 ++ data/.keep | 0 preprocess_data.py | 2 +- process_whole_dataset.sh | 26 +++++ 7 files changed, 424 insertions(+), 1 deletion(-) create mode 100644 .dockerignore create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 data/.keep create mode 100644 process_whole_dataset.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..1d1fe94d --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..4b7892fe --- /dev/null +++ b/.gitignore @@ -0,0 +1,240 @@ +reated by https://www.toptal.com/developers/gitignore/api/pycharm+all,python,visualstudiocode +# Edit at https://www.toptal.com/developers/gitignore?templates=pycharm+all,python,visualstudiocode + +### PyCharm+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +*.code-workspace + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history + +# End of https://www.toptal.com/developers/gitignore/api/pycharm+all,python,visualstudiocode +# data diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..849d6e49 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,145 @@ +FROM nvidia/cudagl:10.2-base-ubuntu18.04 + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y \ + git \ + vim \ + g++ \ + gcc \ + cmake + +WORKDIR /root + + +RUN apt-get update --fix-missing \ + && apt-get upgrade -y + +# Pangolin +RUN git clone https://github.com/stevenlovegrove/Pangolin.git +RUN apt-get install -y \ + libgl1-mesa-dev \ + libglew-dev \ + ffmpeg \ + libavcodec-dev \ + libavutil-dev \ + libavformat-dev \ + libswscale-dev \ + libavdevice-dev \ + libdc1394-22-dev \ + libraw1394-dev \ + libjpeg-dev \ + libpng-dev \ + libtiff5-dev \ + libopenexr-dev \ + libeigen3-dev \ + doxygen \ + libpython3-all-dev \ + libegl1-mesa-dev \ + libwayland-dev \ + libxkbcommon-dev \ + wayland-protocols + +RUN apt-get install -y python3.7 python3-pip python3.7-dev +RUN python3.7 -mpip install \ + numpy \ + pyopengl \ + Pillow \ + pybind11 + +RUN cd Pangolin \ + && git submodule init \ + && git submodule update \ + && mkdir build \ + && cd build \ + && cmake .. \ + && cmake --build . \ + && make install + +# nanoflann +RUN apt-get install -y \ + build-essential \ + libgtest-dev \ + cmake \ + libeigen3-dev + +RUN git clone https://github.com/jlblancoc/nanoflann.git \ + && cd nanoflann \ + && mkdir build \ + && cd build \ + && cmake .. \ + && make \ + && make test \ + && make install \ + && cd .. \ + && mkdir /usr/local/include/nanoflann -p \ + && mkdir /usr/include/nanoflann -p \ + && cp include/nanoflann.hpp /usr/local/include/nanoflann \ + && cp include/nanoflann.hpp /usr/include/nanoflann + +# CLI11 +RUN git clone https://github.com/CLIUtils/CLI11.git \ + && cd CLI11 \ + && git submodule update --init \ + && mkdir build \ + && cd build \ + && cmake .. \ + && make install \ + && GTEST_COLOR=1 CTEST_OUTPUT_ON_FAILURE=1 make test + +# variant +RUN git clone https://github.com/mpark/variant.git \ + && mkdir variant/build \ + && cd variant/build \ + && cmake .. \ + && cmake --build . --target install + +# anaconda +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 +ENV PATH /opt/conda/bin:$PATH + +RUN apt-get update --fix-missing && \ + apt-get install -y wget bzip2 ca-certificates curl git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \ + /bin/bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda clean -tipsy && \ + ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \ + echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \ + echo "conda activate base" >> ~/.bashrc + +# main +WORKDIR /usr/src/sdf + +RUN conda install -y -c pytorch \ + pytorch \ + torchvision \ + cpuonly \ + scikit-image \ + scipy + +RUN conda install -c conda-forge trimesh -y +RUN pip install plyfile + +COPY . . + + +RUN mkdir build \ + && cd build \ + && cmake .. \ + && make -j + + +ENV PANGOLIN_WINDOW_URI headless:// + +RUN mkdir data/ShapeNetCore.v2 -p +RUN mkdir data/ShapeNetCore.v2-DeepSDF -p + +VOLUME /usr/src/sdf/data/ShapeNetCore.v2 +VOLUME /usr/src/sdf/data/ShapeNetCore.v2-DeepSDF + + +CMD ["bash", "-c", "./process_whole_dataset.sh"] diff --git a/README.md b/README.md index 8c4ced53..5370ba78 100644 --- a/README.md +++ b/README.md @@ -153,6 +153,17 @@ Before evaluating a DeepSDF model, a second mesh preprocessing step is required python evaluate.py -e -d --split ``` +### Preprocessing the dataset +Instead of manual dependency installation, you can use prepared docker image: +```bash +$ docker build -t sdfdata . +$ docker run --rm -it \ + -v /ShapeNetCore.v2/:/usr/src/sdf/data/ShapeNetCore.v2/ \ + -v /ShapeNetCore.v2-DeepSDF:/usr/src/sdf/data/ShapeNetCore.v2-DeepSDF/ \ + --gpus all \ # optional argument if you have nvidia card + sdfdata +``` + ##### Note on Table 3 from the CVPR '19 Paper Given the stochastic nature of shape reconstruction (shapes are reconstructed via gradient descent with a random initialization), reconstruction accuracy will vary across multiple reruns of the same shape. The metrics listed in Table 3 for the "chair" and "plane" are the result of performing two reconstructions of each shape and keeping the one with the lowest chamfer distance. The code as released does not support this evaluation and thus the reproduced results will likely differ from those produced in the paper. For example, our test run with the provided code produced Chamfer distance (multiplied by 103) mean and median of 0.157 and 0.062 respectively for the "chair" class and 0.101 and 0.044 for the "plane" class (compared to 0.204, 0.072 for chairs and 0.143, 0.036 for planes reported in the paper). diff --git a/data/.keep b/data/.keep new file mode 100644 index 00000000..e69de29b diff --git a/preprocess_data.py b/preprocess_data.py index 4c9b486f..0943a2dc 100755 --- a/preprocess_data.py +++ b/preprocess_data.py @@ -237,7 +237,7 @@ def append_data_source_map(data_dir, name, source): meshes_targets_and_specific_args.append( ( - os.path.join(shape_dir, mesh_filename), + mesh_filename, processed_filepath, specific_args, ) diff --git a/process_whole_dataset.sh b/process_whole_dataset.sh new file mode 100644 index 00000000..09a6754d --- /dev/null +++ b/process_whole_dataset.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +classes=( "chairs" "lamps" "planes" "sofas" "tables" ) +for cls in "${classes[@]}"; do + echo "Processing ${cls}" + python preprocess_data.py \ + --data_dir data/ShapeNetCore.v2-DeepSDF/ \ + --source data/ShapeNetCore.v2/ \ + --name ShapeNetV2 \ + --split examples/splits/sv2_${cls}_train.json \ + --skip + python preprocess_data.py \ + --data_dir data/ShapeNetCore.v2-DeepSDF/ \ + --source data/ShapeNetCore.v2/ \ + --name ShapeNetV2 \ + --split examples/splits/sv2_${cls}_test.json \ + --test \ + --skip + python preprocess_data.py \ + --data_dir data/ShapeNetCore.v2-DeepSDF/ \ + --source data/ShapeNetCore.v2/ \ + --name ShapeNetV2 \ + --split examples/splits/sv2_${cls}_test.json \ + --surface \ + --skip +done; From 8569f71da1ef722a124cde8d00683e6b7960c33d Mon Sep 17 00:00:00 2001 From: Kacper Kania <57393775+kacperkan@users.noreply.github.com> Date: Thu, 2 Jul 2020 08:50:23 +0200 Subject: [PATCH 2/5] Docker data processing (#2) * Add Dockerfile and script * Change path in the volume * Add folder for the output data * Add docker file and fix paths to be compatible with python3 From 437f19f97643b0104302fcc029c07183cfabf9c6 Mon Sep 17 00:00:00 2001 From: Kacper Kania Date: Thu, 2 Jul 2020 09:30:46 +0200 Subject: [PATCH 3/5] Add note about quantities --- README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README.md b/README.md index 5370ba78..71c99522 100644 --- a/README.md +++ b/README.md @@ -164,6 +164,22 @@ $ docker run --rm -it \ sdfdata ``` +Processed samples quantities: + +| class | \# num of output samples | \# num of samples in json files | split +| ---- | ---- | ---- | --- | +| 02691156 | 1512 | 1780 | train | +| 02691156 | 456 | 300 | test | +| 03001627 | 3859 | 3281 | train | +| 03001627 | 832 | 779 | test | +| 03636649 | 897 | 1039 | train | +| 03636649 | 196 | 213 | test | +| 04256520 | 1906 | 1628 | train | +| 04256520 | 378 | 411 | test | +| 04379243 | 5544 | 4859 | train | +| 04379243 | 1109 | 1216 | test | + + ##### Note on Table 3 from the CVPR '19 Paper Given the stochastic nature of shape reconstruction (shapes are reconstructed via gradient descent with a random initialization), reconstruction accuracy will vary across multiple reruns of the same shape. The metrics listed in Table 3 for the "chair" and "plane" are the result of performing two reconstructions of each shape and keeping the one with the lowest chamfer distance. The code as released does not support this evaluation and thus the reproduced results will likely differ from those produced in the paper. For example, our test run with the provided code produced Chamfer distance (multiplied by 103) mean and median of 0.157 and 0.062 respectively for the "chair" class and 0.101 and 0.044 for the "plane" class (compared to 0.204, 0.072 for chairs and 0.143, 0.036 for planes reported in the paper). From 31a64cdc49fe202298f7af650f9d9f8966c7aa97 Mon Sep 17 00:00:00 2001 From: Kacper Kania <57393775+kacperkan@users.noreply.github.com> Date: Sat, 11 Jul 2020 12:40:02 +0200 Subject: [PATCH 4/5] Docker data processing (#3) * Add Dockerfile and script * Change path in the volume * Add folder for the output data * Add docker file and fix paths to be compatible with python3 * Add processing of all shapes --- process_whole_dataset.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) mode change 100644 => 100755 process_whole_dataset.sh diff --git a/process_whole_dataset.sh b/process_whole_dataset.sh old mode 100644 new mode 100755 index 09a6754d..548c6cd1 --- a/process_whole_dataset.sh +++ b/process_whole_dataset.sh @@ -9,6 +9,20 @@ for cls in "${classes[@]}"; do --name ShapeNetV2 \ --split examples/splits/sv2_${cls}_train.json \ --skip + python preprocess_data.py \ + --data_dir data/ShapeNetCore.v2-DeepSDF/ \ + --source data/ShapeNetCore.v2/ \ + --name ShapeNetV2 \ + --split examples/splits/sv2_${cls}_train.json \ + --test \ + --skip + python preprocess_data.py \ + --data_dir data/ShapeNetCore.v2-DeepSDF/ \ + --source data/ShapeNetCore.v2/ \ + --name ShapeNetV2 \ + --split examples/splits/sv2_${cls}_train.json \ + --surface \ + --skip python preprocess_data.py \ --data_dir data/ShapeNetCore.v2-DeepSDF/ \ --source data/ShapeNetCore.v2/ \ From 5b9a1b762c29709d7408383adcaede8f48688271 Mon Sep 17 00:00:00 2001 From: Kacper Kania Date: Thu, 24 Sep 2020 09:03:29 +0200 Subject: [PATCH 5/5] Process both train and test in the same manner --- process_whole_dataset.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/process_whole_dataset.sh b/process_whole_dataset.sh index 09a6754d..548c6cd1 100644 --- a/process_whole_dataset.sh +++ b/process_whole_dataset.sh @@ -9,6 +9,20 @@ for cls in "${classes[@]}"; do --name ShapeNetV2 \ --split examples/splits/sv2_${cls}_train.json \ --skip + python preprocess_data.py \ + --data_dir data/ShapeNetCore.v2-DeepSDF/ \ + --source data/ShapeNetCore.v2/ \ + --name ShapeNetV2 \ + --split examples/splits/sv2_${cls}_train.json \ + --test \ + --skip + python preprocess_data.py \ + --data_dir data/ShapeNetCore.v2-DeepSDF/ \ + --source data/ShapeNetCore.v2/ \ + --name ShapeNetV2 \ + --split examples/splits/sv2_${cls}_train.json \ + --surface \ + --skip python preprocess_data.py \ --data_dir data/ShapeNetCore.v2-DeepSDF/ \ --source data/ShapeNetCore.v2/ \