From ec86532d6910e8e9068bc9425fb4374fdd00cbc7 Mon Sep 17 00:00:00 2001 From: Zaidhaan Date: Mon, 22 May 2023 21:42:35 +0800 Subject: [PATCH 01/37] Fix missing cstdint header in latest gcc build (#513) Signed-off-by: Zaidhaan Hussain --- profiler/src/RemoteryProfilerImpl.hh | 1 + 1 file changed, 1 insertion(+) diff --git a/profiler/src/RemoteryProfilerImpl.hh b/profiler/src/RemoteryProfilerImpl.hh index 90acd261f..bbb4b89bb 100644 --- a/profiler/src/RemoteryProfilerImpl.hh +++ b/profiler/src/RemoteryProfilerImpl.hh @@ -19,6 +19,7 @@ #define GZ_COMMON_REMOTERYPROFILERIMPL_HH_ #include +#include #include "RemoteryConfig.h" #include "Remotery.h" From c9c200308d79cc685b93e945ac0782514b807b1c Mon Sep 17 00:00:00 2001 From: Angelo Elias Dal Zotto Date: Wed, 31 May 2023 10:14:26 -0300 Subject: [PATCH 02/37] Include cstdint to build with GCC 13 (#517) Signed-off-by: Angelo Elias Dal Zotto --- profiler/src/ProfilerImpl.hh | 1 + 1 file changed, 1 insertion(+) diff --git a/profiler/src/ProfilerImpl.hh b/profiler/src/ProfilerImpl.hh index 82d3ddad2..874d11fca 100644 --- a/profiler/src/ProfilerImpl.hh +++ b/profiler/src/ProfilerImpl.hh @@ -19,6 +19,7 @@ #define GZ_COMMON_PROFILERIMPL_HH_ #include +#include namespace ignition { From 1d3045de3d493a8cf77616b1e4c8983bb891d698 Mon Sep 17 00:00:00 2001 From: shameekganguly Date: Thu, 1 Jun 2023 01:26:02 -0700 Subject: [PATCH 03/37] Add missing header includes (#518) Signed-off-by: Shameek Ganguly --- graphics/src/Image_TEST.cc | 1 + test/integration/mesh.cc | 1 + 2 files changed, 2 insertions(+) diff --git a/graphics/src/Image_TEST.cc b/graphics/src/Image_TEST.cc index 637c73205..cc576e7ce 100644 --- a/graphics/src/Image_TEST.cc +++ b/graphics/src/Image_TEST.cc @@ -14,6 +14,7 @@ * limitations under the License. * */ +#include #include diff --git a/test/integration/mesh.cc b/test/integration/mesh.cc index 5abf30595..eca02ea64 100644 --- a/test/integration/mesh.cc +++ b/test/integration/mesh.cc @@ -14,6 +14,7 @@ * limitations under the License. * */ +#include #include From 11e94807ab35119716e336e14f7a1f2c70aed835 Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Tue, 6 Jun 2023 13:36:10 -0500 Subject: [PATCH 04/37] =?UTF-8?q?=F0=9F=8E=88=203.16.0=20(#519)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michael Carroll --- CMakeLists.txt | 2 +- Changelog.md | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a0c067b8..a3f1d19c7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.10.2 FATAL_ERROR) #============================================================================ # Initialize the project #============================================================================ -project(ignition-common3 VERSION 3.15.1) +project(ignition-common3 VERSION 3.16.0) #============================================================================ # Find ignition-cmake diff --git a/Changelog.md b/Changelog.md index 811827086..dfd6c1b24 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,40 @@ ## Gazebo Common 3.x +## Gazebo Common 3.16.0 (2023-06-05) + +1. Include cstdint to build with GCC 13 + * [Pull request #517](https://github.com/gazebosim/gz-common/pull/517) + +1. Fix missing cstdint header in latest gcc build + * [Pull request #513](https://github.com/gazebosim/gz-common/pull/513) + +1. Fix for ffmpeg v6 + * [Pull request #497](https://github.com/gazebosim/gz-common/pull/497) + +1. Include cstring for memcpy + * [Pull request #501](https://github.com/gazebosim/gz-common/pull/501) + +1. Fixed MeshManager Singleton + * [Pull request #451](https://github.com/gazebosim/gz-common/pull/451) + +1. Rename COPYING to LICENSE + * [Pull request #494](https://github.com/gazebosim/gz-common/pull/494) + +1. Add marcoag as codeowner + * [Pull request #493](https://github.com/gazebosim/gz-common/pull/493) + +1. CI workflow: use checkout v3 + * [Pull request #490](https://github.com/gazebosim/gz-common/pull/490) + +1. Improved coverage remotery + * [Pull request #467](https://github.com/gazebosim/gz-common/pull/467) + +1. Added BVH and STL loader tests + * [Pull request #466](https://github.com/gazebosim/gz-common/pull/466) + +1. Increased Image coverage + * [Pull request #465](https://github.com/gazebosim/gz-common/pull/465) + ## Gazebo Common 3.15.1 (2022-10-11) 1. Fix build on case-insensitive filesystems From 6a0f9d7cd67a3916a0e6661cd99494f2d0e4faf9 Mon Sep 17 00:00:00 2001 From: mosfet80 Date: Tue, 20 Jun 2023 17:50:22 +0200 Subject: [PATCH 05/37] Fix compiling under linux (#521) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: mosfet80 Co-authored-by: Alejandro Hernández Cordero --- profiler/src/RemoteryProfilerImpl.hh | 1 + 1 file changed, 1 insertion(+) diff --git a/profiler/src/RemoteryProfilerImpl.hh b/profiler/src/RemoteryProfilerImpl.hh index bc262be5b..a3b574b4f 100644 --- a/profiler/src/RemoteryProfilerImpl.hh +++ b/profiler/src/RemoteryProfilerImpl.hh @@ -19,6 +19,7 @@ #define GZ_COMMON_REMOTERYPROFILERIMPL_HH_ #include +#include #include "RemoteryConfig.h" #include "Remotery.h" From 5f240a5f917bcfac07f632cafd4bc744b98f1231 Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Wed, 12 Jul 2023 15:47:37 -0500 Subject: [PATCH 06/37] Only build integration tests if libraries exist (#523) Signed-off-by: Michael Carroll --- test/integration/CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/integration/CMakeLists.txt b/test/integration/CMakeLists.txt index a863807d2..ae5215847 100644 --- a/test/integration/CMakeLists.txt +++ b/test/integration/CMakeLists.txt @@ -5,6 +5,10 @@ if (SKIP_av OR INTERNAL_SKIP_av) list(REMOVE_ITEM tests video_encoder.cc) endif() +if (SKIP_graphics OR INTERNAL_SKIP_graphics) + list(REMOVE_ITEM tests mesh.cc) +endif() + gz_build_tests( TYPE INTEGRATION SOURCES ${tests} @@ -19,10 +23,12 @@ if(TARGET INTEGRATION_plugin) target_include_directories(INTEGRATION_plugin PRIVATE ${PROJECT_SOURCE_DIR}/test) endif() +# Graphics specific integration tests if(TARGET INTEGRATION_mesh) target_link_libraries(INTEGRATION_mesh ${PROJECT_LIBRARY_TARGET_NAME}-graphics) endif() +# AV specific integration tests if(TARGET INTEGRATION_encoder_timing) target_link_libraries(INTEGRATION_encoder_timing ${PROJECT_LIBRARY_TARGET_NAME}-av) endif() From 15ef98538811881d5c3f5e9fa20ad1b77a28af3d Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Mon, 17 Jul 2023 14:39:17 -0700 Subject: [PATCH 07/37] Fix Github Actions on macOS (#524) Signed-off-by: Michael Carroll --- .github/workflows/macos.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index e544a8c48..54e002fd5 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -14,6 +14,15 @@ jobs: uses: Homebrew/actions/setup-homebrew@master - run: brew config + # Workaround for https://github.com/actions/setup-python/issues/577 + - name: Clean up python binaries + run: | + rm -f /usr/local/bin/2to3*; + rm -f /usr/local/bin/idle3*; + rm -f /usr/local/bin/pydoc3*; + rm -f /usr/local/bin/python3*; + rm -f /usr/local/bin/python3*-config; + - name: Install base dependencies run: | brew tap osrf/simulation; From 3d6197bdd26d8d5bb313984209b1f34ab44e3bc1 Mon Sep 17 00:00:00 2001 From: "Addisu Z. Taddese" Date: Tue, 1 Aug 2023 15:56:21 -0500 Subject: [PATCH 08/37] Fix Github project automation for new project board (#526) Signed-off-by: Addisu Z. Taddese --- .github/workflows/triage.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index 736670e0e..152404bee 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -1,7 +1,7 @@ on: issues: types: [opened] - pull_request_target: + pull_request: types: [opened] name: Ticket opened jobs: @@ -10,10 +10,8 @@ jobs: runs-on: ubuntu-latest steps: - name: Add ticket to inbox - uses: technote-space/create-project-card-action@v1 + uses: actions/add-to-project@v0.5.0 with: - PROJECT: Core development - COLUMN: Inbox - GITHUB_TOKEN: ${{ secrets.TRIAGE_TOKEN }} - CHECK_ORG_PROJECT: true + project-url: https://github.com/orgs/gazebosim/projects/7 + github-token: ${{ secrets.TRIAGE_TOKEN }} From e282a8d3e89efeef0e788baa5dbd7e4459fed574 Mon Sep 17 00:00:00 2001 From: "Addisu Z. Taddese" Date: Wed, 2 Aug 2023 15:48:11 -0500 Subject: [PATCH 09/37] Use `pull_request_target` for triage workflow (#527) Signed-off-by: Addisu Z. Taddese --- .github/workflows/triage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index 152404bee..2c94852da 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -1,7 +1,7 @@ on: issues: types: [opened] - pull_request: + pull_request_target: types: [opened] name: Ticket opened jobs: From 1243852c4bd8525ffc760a620e7d97f94cc2375c Mon Sep 17 00:00:00 2001 From: Jose Luis Rivero Date: Thu, 10 Aug 2023 17:44:13 +0200 Subject: [PATCH 10/37] Missing cstdint header to use uint32_t with gcc-13 (#528) Signed-off-by: Jose Luis Rivero --- profiler/src/ProfilerImpl.hh | 1 + 1 file changed, 1 insertion(+) diff --git a/profiler/src/ProfilerImpl.hh b/profiler/src/ProfilerImpl.hh index 82d3ddad2..02d4a3769 100644 --- a/profiler/src/ProfilerImpl.hh +++ b/profiler/src/ProfilerImpl.hh @@ -18,6 +18,7 @@ #ifndef GZ_COMMON_PROFILERIMPL_HH_ #define GZ_COMMON_PROFILERIMPL_HH_ +#include #include namespace ignition From 83f87583f9b5bc8902bb89115ef973534aa46688 Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Mon, 21 Aug 2023 19:28:22 -0500 Subject: [PATCH 11/37] =?UTF-8?q?=F0=9F=8E=88=205.4.1=20release=20(#531)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michael Carroll --- CMakeLists.txt | 2 +- Changelog.md | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f48a733dd..b13ff3b98 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.10.2 FATAL_ERROR) #============================================================================ # Initialize the project #============================================================================ -project(gz-common5 VERSION 5.4.0) +project(gz-common5 VERSION 5.4.1) set(GZ_COMMON_VER ${PROJECT_VERSION_MAJOR}) #============================================================================ diff --git a/Changelog.md b/Changelog.md index e38588937..c1fa7779b 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,40 @@ ## Gazebo Common 5.x +## Gazebo Common 5.4.1 (2023-08-21) + +1. Use `pull_request_target` for triage workflow + * [Pull request #527](https://github.com/gazebosim/gz-common/pull/527) + +1. Fix Github project automation for new project board + * [Pull request #526](https://github.com/gazebosim/gz-common/pull/526) + +1. Fix Github Actions on macOS + * [Pull request #524](https://github.com/gazebosim/gz-common/pull/524) + +1. Only build integration tests if libraries exist + * [Pull request #523](https://github.com/gazebosim/gz-common/pull/523) + +1. Fix compiling under linux + * [Pull request #521](https://github.com/gazebosim/gz-common/pull/521) + +1. Add missing header includes + * [Pull request #518](https://github.com/gazebosim/gz-common/pull/518) + +1. Header was dropped in the forward port and breaks downstream + * [Pull request #515](https://github.com/gazebosim/gz-common/pull/515) + +1. Port: 4 to 5 + * [Pull request #511](https://github.com/gazebosim/gz-common/pull/511) + +1. Lint + * [Pull request #Lint](https://github.com/gazebosim/gz-common/pull/Lint) + +1. 🎈 4.7.0 + * [Pull request #510](https://github.com/gazebosim/gz-common/pull/510) + +1. Fix build error when using gz:: with ign-common4 + * [Pull request #489](https://github.com/gazebosim/gz-common/pull/489) + ## Gazebo Common 5.4.0 (2023-04-28) 1. Add support for bayer images to be saved in a directory From 6e2444ff97616f68a2bcfd97b4cd2acec880b93b Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Mon, 28 Aug 2023 09:04:28 -0500 Subject: [PATCH 12/37] Build examples from CMake rather than executable (#502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michael Carroll Signed-off-by: Michael Carroll Signed-off-by: Michael Carroll Co-authored-by: Alejandro Hernández Cordero Co-authored-by: Ian Chen --- .github/workflows/macos.yml | 10 ++++++---- CMakeLists.txt | 11 ++++++++++- examples/CMakeLists.txt | 4 +++- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 54e002fd5..de4fd5d0a 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -48,12 +48,14 @@ jobs: run: cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local/Cellar/${PACKAGE}/HEAD - run: make working-directory: build - - run: make test - working-directory: build - env: - CTEST_OUTPUT_ON_FAILURE: 1 + # Run make install before make test so that the package will be available to + # build examples as part of the test - name: make install working-directory: build run: | make install; brew link ${PACKAGE}; + - run: make test + working-directory: build + env: + CTEST_OUTPUT_ON_FAILURE: 1 diff --git a/CMakeLists.txt b/CMakeLists.txt index b13ff3b98..ad6213748 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -144,7 +144,6 @@ gz_configure_build(QUIT_IF_BUILD_ERRORS #============================================================================ gz_create_packages() - #============================================================================ # Create documentation #============================================================================ @@ -157,3 +156,13 @@ gz_create_docs( TAGFILES "${GZ-MATH_DOXYGEN_TAGFILE} = ${GZ-MATH_API_URL}" ) + +#============================================================================ +# Build examples +#============================================================================ +if (BUILD_TESTING) + gz_build_examples( + SOURCE_DIR ${PROJECT_SOURCE_DIR}/examples + BINARY_DIR ${PROJECT_BINARY_DIR}/examples + ) +endif() diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 22f20252c..3908f3878 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,4 +1,6 @@ -cmake_minimum_required(VERSION 2.8 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) + +project(gz-common-examples) # Find the gz-common library find_package(gz-common5 QUIET REQUIRED COMPONENTS events profiler) From e18906b178c6dc3b2af5b418d90ea08bd571f540 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Wed, 30 Aug 2023 20:01:22 -0700 Subject: [PATCH 13/37] Fix glTF metalness and roughness map orientation (#532) Signed-off-by: Ian Chen --- graphics/src/AssimpLoader.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/graphics/src/AssimpLoader.cc b/graphics/src/AssimpLoader.cc index f38da665d..eeac1bee3 100644 --- a/graphics/src/AssimpLoader.cc +++ b/graphics/src/AssimpLoader.cc @@ -507,13 +507,13 @@ std::pair std::vector metalnessData(width * height * bytesPerPixel); std::vector roughnessData(width * height * bytesPerPixel); - for (unsigned int x = 0; x < width; ++x) + for (unsigned int y = 0; y < height; ++y) { - for (unsigned int y = 0; y < height; ++y) + for (unsigned int x = 0; x < width; ++x) { // RGBA so 4 bytes per pixel, alpha fully opaque - auto baseIndex = bytesPerPixel * (x * height + y); - auto color = _img.Pixel(x, y); + auto baseIndex = bytesPerPixel * (y * width + x); + auto color = _img.Pixel(x, (height - y - 1)); metalnessData[baseIndex] = color.B() * 255.0; metalnessData[baseIndex + 1] = color.B() * 255.0; metalnessData[baseIndex + 2] = color.B() * 255.0; From 038bf79cda4d66065431212ad175ec220fda6893 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alejandro=20Hern=C3=A1ndez=20Cordero?= Date: Tue, 5 Sep 2023 21:51:01 +0200 Subject: [PATCH 14/37] Update profiler.md (#535) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Alejandro Hernández Cordero --- tutorials/profiler.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorials/profiler.md b/tutorials/profiler.md index 22808d649..b0c07a11f 100644 --- a/tutorials/profiler.md +++ b/tutorials/profiler.md @@ -125,8 +125,8 @@ colcon build --cmake-args -DENABLE_PROFILER=1 ``` Run your Gazebo library then go to your Gazebo installation path and open the profiler browser using: -``` -libexec/gz/gz-common/gz_remotery_vis +```{.sh} +/install/libexec/gz/gz-common/gz_remotery_vis ``` If the profiler is run successfully, you should see output in a browser. Similar to this From 33b6175fde4e2fad25e3b734f3ede3adbffacab7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alejandro=20Hern=C3=A1ndez=20Cordero?= Date: Tue, 5 Sep 2023 22:09:37 +0200 Subject: [PATCH 15/37] Update README.md (#534) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Alejandro Hernández Cordero --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a634002bf..445034321 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ callback system. # Install -See the [installation tutorial](https://gazebosim.org/api/common/4.0/install.html). +See the [installation tutorial](https://gazebosim.org/api/common/5/install.html). # Usage @@ -69,8 +69,10 @@ Refer to the following table for information about important directories and fil +-- av Header and source files for the AV component. +-- events Header and source files for the Event component. +-- examples Example programs. ++-- geospatial Geospatial component for heightmap & DEMs +-- graphics Header and source files for the Graphics component. -+-- include/gz/common Header files for the core component. ++-- include/gz/common Header files for the core component. ++-- io IO component. Read object from stream. +-- profiler Header and source files for the Profiler component. +-- src Core source files and unit tests. +-- test From 0e866d8d2663096b98516c1de17c133f0cbc7253 Mon Sep 17 00:00:00 2001 From: "Addisu Z. Taddese" Date: Tue, 26 Sep 2023 14:49:25 -0500 Subject: [PATCH 16/37] Prepare for 5.4.2 release (#536) Signed-off-by: Addisu Z. Taddese --- CMakeLists.txt | 2 +- Changelog.md | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ad6213748..09f53ebbd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.10.2 FATAL_ERROR) #============================================================================ # Initialize the project #============================================================================ -project(gz-common5 VERSION 5.4.1) +project(gz-common5 VERSION 5.4.2) set(GZ_COMMON_VER ${PROJECT_VERSION_MAJOR}) #============================================================================ diff --git a/Changelog.md b/Changelog.md index c1fa7779b..976e01331 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,18 @@ ## Gazebo Common 5.x +## Gazebo Common 5.4.2 (2023-09-26) + +1. Documentation fixes + * [Pull request #534](https://github.com/gazebosim/gz-common/pull/534) + * [Pull request #535](https://github.com/gazebosim/gz-common/pull/535) + +1. Fix glTF metalness and roughness map orientation + * [Pull request #532](https://github.com/gazebosim/gz-common/pull/532) + +1. Build examples from CMake rather than executable + * [Pull request #502](https://github.com/gazebosim/gz-common/pull/502) + + ## Gazebo Common 5.4.1 (2023-08-21) 1. Use `pull_request_target` for triage workflow From 7838ced4b8547be15ddeaa76fbf3ff6d900a7b72 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Thu, 5 Oct 2023 09:50:31 -0700 Subject: [PATCH 17/37] Prevent loading lightmaps if mesh is a glb file that has an occlusion-metallic-roughness texture (#538) * Prevent loading lightmaps if mesh is a glb file --------- Signed-off-by: Ian Chen --- graphics/src/AssimpLoader.cc | 24 +++++++++++++++--------- graphics/src/AssimpLoader_TEST.cc | 10 ++++++++-- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/graphics/src/AssimpLoader.cc b/graphics/src/AssimpLoader.cc index eeac1bee3..ba2c57655 100644 --- a/graphics/src/AssimpLoader.cc +++ b/graphics/src/AssimpLoader.cc @@ -425,6 +425,21 @@ MaterialPtr AssimpLoader::Implementation::CreateMaterial( this->GenerateTextureName(_scene, assimpMat, "Roughness")); pbr.SetRoughnessMap(texName, texData); } + // Load lightmap only if it is not a glb/glTF mesh that contains a + // MetallicRoughness texture + // It was found that lightmap field just stores the entire MetallicRoughness + // texture. Issues were also reported in assimp: + // https://github.com/assimp/assimp/issues/3120 + // https://github.com/assimp/assimp/issues/4637 + unsigned int uvIdx = 0; + ret = assimpMat->GetTexture( + aiTextureType_LIGHTMAP, 0, &texturePath, NULL, &uvIdx); + if (ret == AI_SUCCESS) + { + auto [texName, texData] = this->LoadTexture(_scene, texturePath, + this->GenerateTextureName(_scene, assimpMat, "Lightmap")); + pbr.SetLightMap(texName, uvIdx, texData); + } } #endif ret = assimpMat->GetTexture(aiTextureType_NORMALS, 0, &texturePath); @@ -442,15 +457,6 @@ MaterialPtr AssimpLoader::Implementation::CreateMaterial( this->GenerateTextureName(_scene, assimpMat, "Emissive")); pbr.SetEmissiveMap(texName, texData); } - unsigned int uvIdx = 0; - ret = assimpMat->GetTexture( - aiTextureType_LIGHTMAP, 0, &texturePath, NULL, &uvIdx); - if (ret == AI_SUCCESS) - { - auto [texName, texData] = this->LoadTexture(_scene, texturePath, - this->GenerateTextureName(_scene, assimpMat, "Lightmap")); - pbr.SetLightMap(texName, uvIdx, texData); - } #ifndef GZ_ASSIMP_PRE_5_2_0 float value; ret = assimpMat->Get(AI_MATKEY_METALLIC_FACTOR, value); diff --git a/graphics/src/AssimpLoader_TEST.cc b/graphics/src/AssimpLoader_TEST.cc index 87e6b8210..f171d427e 100644 --- a/graphics/src/AssimpLoader_TEST.cc +++ b/graphics/src/AssimpLoader_TEST.cc @@ -708,10 +708,16 @@ TEST_F(AssimpLoader, LoadGlbPbrAsset) // Check pixel values to test metallicroughness texture splitting EXPECT_FLOAT_EQ(pbr->MetalnessMapData()->Pixel(256, 256).R(), 0.0); EXPECT_FLOAT_EQ(pbr->RoughnessMapData()->Pixel(256, 256).R(), 124.0 / 255.0); + // Bug in assimp 5.0.x that doesn't parse coordinate sets properly - EXPECT_EQ(pbr->LightMapTexCoordSet(), 1); + // \todo(iche033) Lightmaps are disabled for glb meshes + // due to upstream bug + // EXPECT_EQ(pbr->LightMapTexCoordSet(), 1); #endif - EXPECT_NE(pbr->LightMapData(), nullptr); + + // \todo(iche033) Lightmaps are disabled for glb meshes + // due to upstream bug + // EXPECT_NE(pbr->LightMapData(), nullptr); // Mesh has 3 animations auto skel = mesh->MeshSkeleton(); From d9d1722d3251ded4c716a1d30a146d17eca4534c Mon Sep 17 00:00:00 2001 From: Steve Peters Date: Sat, 7 Oct 2023 22:31:37 -0700 Subject: [PATCH 18/37] EnumIface: suppress deprecation warning (#540) The EnumIterator inherits from std::iterator, which has been deprecated in c++17. There is a fix in gz-common6 but it breaks API, so it can't be merged to Garden. So just suppress the deprecation warning in Garden. Signed-off-by: Steve Peters --- include/gz/common/EnumIface.hh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/gz/common/EnumIface.hh b/include/gz/common/EnumIface.hh index 2b0fe1dd8..83a03e22a 100644 --- a/include/gz/common/EnumIface.hh +++ b/include/gz/common/EnumIface.hh @@ -141,6 +141,7 @@ namespace gz /// std::cout << "Type++ Name[" << myTypeIface.Str(*i) << "]\n"; /// } /// \verbatim + GZ_UTILS_WARN_IGNORE__DEPRECATED_DECLARATION template class EnumIterator : std::iterator @@ -218,6 +219,7 @@ namespace gz /// member value ever used. private: Enum c; }; + GZ_UTILS_WARN_RESUME__DEPRECATED_DECLARATION /// \brief Equality operator /// \param[in] _e1 First iterator From afc1ab06303396581b740f8e114f9033506b9513 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Mon, 23 Oct 2023 09:28:38 -0700 Subject: [PATCH 19/37] Fix glTF / glb root node transform (#543) Signed-off-by: Ian Chen --- graphics/src/AssimpLoader.cc | 50 ++++++++++++++---- graphics/src/AssimpLoader_TEST.cc | 43 +++++++++++++++ .../multiple_texture_coordinates_triangle.glb | Bin 0 -> 2132 bytes 3 files changed, 84 insertions(+), 9 deletions(-) create mode 100644 test/data/multiple_texture_coordinates_triangle.glb diff --git a/graphics/src/AssimpLoader.cc b/graphics/src/AssimpLoader.cc index ba2c57655..79f513b41 100644 --- a/graphics/src/AssimpLoader.cc +++ b/graphics/src/AssimpLoader.cc @@ -155,6 +155,15 @@ class AssimpLoader::Implementation /// calculated from the "old" parent model transform. /// \param[in] _skeleton the skeleton to work on public: void ApplyInvBindTransform(SkeletonPtr _skeleton) const; + + /// Get the updated root node transform. The function updates the original + /// transform by setting the rotation to identity if requested. + /// \param[in] _scene Scene with axes info stored in meta data + /// \param[in] _useIdentityRotation Whether to set rotation to identity. + /// Note: This is currently set to false for glTF / glb meshes. + /// \return Updated transform + public: aiMatrix4x4 UpdatedRootNodeTransform(const aiScene *_scene, + bool _useIdentityRotation = true); }; ////////////////////////////////////////////////// @@ -645,16 +654,15 @@ Mesh *AssimpLoader::Load(const std::string &_filename) } auto& rootNode = scene->mRootNode; auto rootName = ToString(rootNode->mName); - auto transform = scene->mRootNode->mTransformation; - aiVector3D rootScaling, rootAxis, rootPos; - float angle; - transform.Decompose(rootScaling, rootAxis, angle, rootPos); - // drop rotation, but keep scaling and position - // TODO(luca) it seems imported assets are rotated by 90 degrees - // as documented here https://github.com/assimp/assimp/issues/849 - // remove workaround when fixed - transform = aiMatrix4x4(rootScaling, aiQuaternion(), rootPos); + // compute assimp root node transform + std::string extension = _filename.substr(_filename.rfind(".") + 1, + _filename.size()); + std::transform(extension.begin(), extension.end(), + extension.begin(), ::tolower); + bool useIdentityRotation = (extension != "glb" && extension != "glTF"); + auto transform = this->dataPtr->UpdatedRootNodeTransform(scene, + useIdentityRotation); auto rootTransform = this->dataPtr->ConvertTransform(transform); // Add the materials first @@ -750,5 +758,29 @@ void AssimpLoader::Implementation::ApplyInvBindTransform( } } +///////////////////////////////////////////////// +aiMatrix4x4 AssimpLoader::Implementation::UpdatedRootNodeTransform( + const aiScene *_scene, bool _useIdentityRotation) +{ + // Some assets apear to be rotated by 90 degrees as documented here + // https://github.com/assimp/assimp/issues/849. + auto transform = _scene->mRootNode->mTransformation; + if (_useIdentityRotation) + { + // drop rotation, but keep scaling and position + aiVector3D rootScaling, rootAxis, rootPos; + float angle; + transform.Decompose(rootScaling, rootAxis, angle, rootPos); + transform = aiMatrix4x4(rootScaling, aiQuaternion(), rootPos); + } + // for glTF / glb meshes, it was found that the transform is needed to + // produce a result that is consistent with other engines / glTF viewers. + else + { + transform = _scene->mRootNode->mTransformation; + } + return transform; +} + } } diff --git a/graphics/src/AssimpLoader_TEST.cc b/graphics/src/AssimpLoader_TEST.cc index f171d427e..fffd50f45 100644 --- a/graphics/src/AssimpLoader_TEST.cc +++ b/graphics/src/AssimpLoader_TEST.cc @@ -744,3 +744,46 @@ TEST_F(AssimpLoader, CheckNonRootDisplacement) auto xDisplacement = skelAnim->XDisplacement(); ASSERT_TRUE(xDisplacement); } + +///////////////////////////////////////////////// +TEST_F(AssimpLoader, LoadGLTF2Triangle) +{ + common::AssimpLoader loader; + common::Mesh *mesh = loader.Load( + common::testing::TestFile("data", + "multiple_texture_coordinates_triangle.glb")); + ASSERT_TRUE(mesh); + + EXPECT_EQ(6u, mesh->VertexCount()); + EXPECT_EQ(6u, mesh->NormalCount()); + EXPECT_EQ(6u, mesh->IndexCount()); + EXPECT_EQ(6u, mesh->TexCoordCount()); + EXPECT_EQ(2u, mesh->SubMeshCount()); + EXPECT_EQ(1u, mesh->MaterialCount()); + + auto sm = mesh->SubMeshByIndex(0u); + auto subMesh = sm.lock(); + EXPECT_NE(nullptr, subMesh); + EXPECT_EQ(math::Vector3d(0, 0, 0), subMesh->Vertex(0u)); + EXPECT_EQ(math::Vector3d(10, 0, 0), subMesh->Vertex(1u)); + EXPECT_EQ(math::Vector3d(10, 10, 0), subMesh->Vertex(2u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), subMesh->Normal(0u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), subMesh->Normal(1u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), subMesh->Normal(2u)); + EXPECT_EQ(math::Vector2d(0, 1), subMesh->TexCoord(0u)); + EXPECT_EQ(math::Vector2d(0, 1), subMesh->TexCoord(1u)); + EXPECT_EQ(math::Vector2d(0, 1), subMesh->TexCoord(2u)); + + auto smb = mesh->SubMeshByIndex(1u); + auto subMeshB = smb.lock(); + EXPECT_NE(nullptr, subMeshB); + EXPECT_EQ(math::Vector3d(10, 0, 0), subMeshB->Vertex(0u)); + EXPECT_EQ(math::Vector3d(20, 0, 0), subMeshB->Vertex(1u)); + EXPECT_EQ(math::Vector3d(20, 10, 0), subMeshB->Vertex(2u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), subMeshB->Normal(0u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), subMeshB->Normal(1u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), subMeshB->Normal(2u)); + EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoord(0u)); + EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoord(1u)); + EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoord(2u)); +} diff --git a/test/data/multiple_texture_coordinates_triangle.glb b/test/data/multiple_texture_coordinates_triangle.glb new file mode 100644 index 0000000000000000000000000000000000000000..547c93e3cab7e69fd2049405800cda33672b7b34 GIT binary patch literal 2132 zcmb_d!EV|>5Z(0B9((Q0no|!hwbulS4~+;-BwC6qu%b#4sthcSCA>!VLK`8Z{s#Fb z`cIu*aBvL*htwSG+3}nA_RX;N$L{qDLWui{khd=h`P_BdbMTUc39#qjGtOd9L^0U# z}Prv&WVIdCt; zHR!6G4m=t%nJ{#-x9Dx-IfN{^Q{W9k#DT~CF~iDf(!}@I<0)f-f*1G zrymB`G!vR&Tj)T*hrvKrC@*?a_CB=*dCzr7c`F$r^Yxk-_45KTGGgr!61{la#ksnS=h6$aRGY~MN( z3IjJ+4NLVYTN~{a=-s^%Q* z*dbSOc8)09qIX@zQF{iu_B1rk+IE_VWB57c19z0lHma)yx5nA znpLh_ctZ0n3GxnaewSew?+Sv7az7*T+x_pA<@=xfMCo3wRv#tb!*Z$ae^xEEZdsPPKP=x|`~@r8SV8~* literal 0 HcmV?d00001 From 3742a5a5dede67953beefc963196d517cae202e8 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Wed, 25 Oct 2023 07:38:45 -0700 Subject: [PATCH 20/37] Support loading glb with compressed jpeg textures (#545) Signed-off-by: Ian Chen --- graphics/include/gz/common/Image.hh | 6 ++++- graphics/src/AssimpLoader.cc | 25 +++++++++++++++++---- graphics/src/AssimpLoader_TEST.cc | 33 ++++++++++++++++++++++++++++ graphics/src/Image.cc | 16 ++++++++++++-- test/data/box_texture_jpg.glb | Bin 0 -> 7612 bytes 5 files changed, 73 insertions(+), 7 deletions(-) create mode 100644 test/data/box_texture_jpg.glb diff --git a/graphics/include/gz/common/Image.hh b/graphics/include/gz/common/Image.hh index 4a30eb4ce..42196def8 100644 --- a/graphics/include/gz/common/Image.hh +++ b/graphics/include/gz/common/Image.hh @@ -85,7 +85,11 @@ namespace gz BAYER_GBRG8, BAYER_GRBG8, COMPRESSED_PNG, - PIXEL_FORMAT_COUNT + PIXEL_FORMAT_COUNT, + // \todo(iche033) COMPRESSED_JPEG is added at the end to + // preserve ABI compatibility. Move this enum up when merging + // forward to main + COMPRESSED_JPEG }; diff --git a/graphics/src/AssimpLoader.cc b/graphics/src/AssimpLoader.cc index 79f513b41..25d2e9ed6 100644 --- a/graphics/src/AssimpLoader.cc +++ b/graphics/src/AssimpLoader.cc @@ -551,16 +551,33 @@ std::pair ImagePtr AssimpLoader::Implementation::LoadEmbeddedTexture( const aiTexture* _texture) const { - auto img = std::make_shared(); if (_texture->mHeight == 0) { + Image::PixelFormatType format = Image::PixelFormatType::UNKNOWN_PIXEL_FORMAT; if (_texture->CheckFormat("png")) { - img->SetFromCompressedData((unsigned char*)_texture->pcData, - _texture->mWidth, Image::PixelFormatType::COMPRESSED_PNG); + format = Image::PixelFormatType::COMPRESSED_PNG; + } + else if (_texture->CheckFormat("jpg")) + { + format = Image::PixelFormatType::COMPRESSED_JPEG; + } + if (format != Image::PixelFormatType::UNKNOWN_PIXEL_FORMAT) + { + auto img = std::make_shared(); + img->SetFromCompressedData( + reinterpret_cast(_texture->pcData), + _texture->mWidth, format); + return img; + } + else + { + gzerr << "Unable to load embedded texture. " + << "Unsupported compressed image format" + << std::endl; } } - return img; + return ImagePtr(); } ////////////////////////////////////////////////// diff --git a/graphics/src/AssimpLoader_TEST.cc b/graphics/src/AssimpLoader_TEST.cc index fffd50f45..5df30d0f1 100644 --- a/graphics/src/AssimpLoader_TEST.cc +++ b/graphics/src/AssimpLoader_TEST.cc @@ -644,6 +644,39 @@ TEST_F(AssimpLoader, LoadGlTF2BoxExternalTexture) EXPECT_EQ(testTextureFile, mat->TextureImage()); } +///////////////////////////////////////////////// +// This test loads a box glb mesh with embedded compressed jpeg texture +TEST_F(AssimpLoader, LoadGlTF2BoxWithJPEGTexture) +{ + common::AssimpLoader loader; + common::Mesh *mesh = loader.Load( + common::testing::TestFile("data", "box_texture_jpg.glb")); + + EXPECT_STREQ("unknown", mesh->Name().c_str()); + EXPECT_EQ(math::Vector3d(1, 1, 1), mesh->Max()); + EXPECT_EQ(math::Vector3d(-1, -1, -1), mesh->Min()); + + EXPECT_EQ(24u, mesh->VertexCount()); + EXPECT_EQ(24u, mesh->NormalCount()); + EXPECT_EQ(36u, mesh->IndexCount()); + EXPECT_EQ(24u, mesh->TexCoordCount()); + EXPECT_EQ(1u, mesh->SubMeshCount()); + EXPECT_EQ(1u, mesh->MaterialCount()); + + // Make sure we can read the submesh name + EXPECT_STREQ("Cube", mesh->SubMeshByIndex(0).lock()->Name().c_str()); + + const common::MaterialPtr mat = mesh->MaterialByIndex(0u); + ASSERT_TRUE(mat.get()); + + // Make sure we read the material color values + EXPECT_EQ(math::Color(0.4f, 0.4f, 0.4f, 1.0f), mat->Ambient()); + EXPECT_EQ(math::Color(1.0f, 1.0f, 1.0f, 1.0f), mat->Diffuse()); + EXPECT_EQ(math::Color(0.0f, 0.0f, 0.0f, 1.0f), mat->Specular()); + EXPECT_EQ("Cube_Material_Diffuse", mat->TextureImage()); + EXPECT_NE(nullptr, mat->TextureData()); +} + ///////////////////////////////////////////////// // Use a fully featured glb test asset, including PBR textures, emissive maps // embedded textures, lightmaps, animations to test advanced glb features diff --git a/graphics/src/Image.cc b/graphics/src/Image.cc index 2eb76154b..1bbdf74ff 100644 --- a/graphics/src/Image.cc +++ b/graphics/src/Image.cc @@ -274,10 +274,22 @@ void Image::SetFromCompressedData(unsigned char *_data, FreeImage_Unload(this->dataPtr->bitmap); this->dataPtr->bitmap = nullptr; - if (_format == COMPRESSED_PNG) + FREE_IMAGE_FORMAT format = FIF_UNKNOWN; + switch (_format) + { + case COMPRESSED_PNG: + format = FIF_PNG; + break; + case COMPRESSED_JPEG: + format = FIF_JPEG; + break; + default: + break; + } + if (format != FIF_UNKNOWN) { FIMEMORY *fiMem = FreeImage_OpenMemory(_data, _size); - this->dataPtr->bitmap = FreeImage_LoadFromMemory(FIF_PNG, fiMem); + this->dataPtr->bitmap = FreeImage_LoadFromMemory(format, fiMem); FreeImage_CloseMemory(fiMem); } else diff --git a/test/data/box_texture_jpg.glb b/test/data/box_texture_jpg.glb new file mode 100644 index 0000000000000000000000000000000000000000..6260273cd26e689e734b21350cb75e9a04447f27 GIT binary patch literal 7612 zcmeHJc|6qH`#;Rsm57RH7+h;&j5TYBansn9C3lR$*o`5})hOJ`wS=^|*Dhn1N|uyF zXc0w7i!7ChvS!K5_cNAmKX>{5?)~TYdi`GaaL)Oh=e(ciJn!du4#(ZwdOtG&piTsU zJU9TRR@j5#NL*kbfsE7)N4gVy2_ziZkA&1je(yo@^Ysf9b%#_$_jwb1T?r&n%pR<0 zu#$qBg0eDF9vMs^1rq&yAuvio5g8$m48%hQkeZ5XUx7$Xhj64X&IghqtycErk-mPe z1jdGBN5L~ zQT{C)43}3zNM7Z#8lFI$kH0qo${!;KenBKWBOMF}i9R^@H6L7p+}sExTO#2oQWFgs zB>E7nL;V>!S^@3x^e4DOL0k)~J1&gi;^*w`=kB+X4jdlxB+!qvdfvCTpd;~qKK_2b z1Yh#1K@}yGDnl6L3zZj&aTX2=wS$s8^ux$35vm=B-AeK+zmR~u>V~b6fihH1P`1NxRLq594JZ-cdx8<$Wr;Q+zc zoeb4HN?lzZ$q-=O+*U3kREz7}|2r7c{tLL021NW@+bU|Re*st9fL2reE8{9EDo{?p zwXTj{DYmsLTxAUv-hQGtbcd0e8fqwwm9&2qXsD|(s&h3`Yf<_pG^)xPs!)}Se%S%Q zKFmSrFIS}Q-{|$NpZtza7v0DOQO8o2=v*aBdH6B;?e zcEAFd0XsCZ11`V|HUlPTWCCzN@1?&6TTJ(3_5&s+0EwW1{u~$pc2-t4Ru*0+^X#FlIQ6g#`{hZkQsVeE{cW;S*ERXWeRnV?ze;qmCzKvy1Pm zXb`Y`{7FLDg>-_0bKCYEf42||1n_x_>tZi)V4js0~yAs^o zJ&2xx?my}5dfMI7`>bzZaA^3=+jk@HM<=JIXJ+R<&o3-8crgLkDlKSSWwwDAFT{%( z4u`?n7`&L6k1~k!!db+WSo!oV*l+<`k*MSB{QHu!D;hY&l`TIBxR4%mZj(R{NKP`S ztuXuF5j*jZn0+Po8?QdV4P%0m2jhjReSsD$oyGib|2GEz?`lA1(r{X*wd-n77OPEh zIETCFW$6o#26fMr%54(7lrC=4H|X{nSw=ZHJF}@Km4;3tX$-cuTi#*EI!vFdi_UcxgiI@qT zgHK-6hc{+t!28(Ypmyelz}g7;624i*%<6tD_RUP0kVc7=oSDPoXqdoM*qKBXHOg4_ z;59q_A`g;-BSxTUj~_{+aWhYS=L=hjxhaRshqnuk7#kcZA{OVi1Ox=TT3Z+?vG7~y z{a^I$@*i$J;kt@rY7q%^syeA99#AZrr270>RFsruOYc#gw>9ybm84C2&+Phvx@*4N zZgP8L`_>T7K{I2M)Q{xH5460_+0a4$>Gu=5ruVNDynZU>=BNK8uCu5tzEsYr;_<-L zxERx^g*;!UThv9{m_Q*!+=oi-%P)N@YqfBRt?zp077^XGVGE|8iaF}NYG&ud&Qe)E z1anH~RH(^}YC2^U`{L@3yB=xDwi@N}oKEDONBN)xrB2_ydw$aQw(H{JvQe=8yPr$Q z@sIZ#-R&59U~ngMF42!_Kdl|uRHYf+@bR5Qkcs(^#WRSmp2~AinyBP)gl@;DY13-k z8vD90^;VfTFK;RTg?I1XsJ;T5pMR-QFnJl7n-g1pWMI0%NZ=B|^(S4OfEIHez+6JIkI$zIg__@3SDbnsw#BonukeMTG&|czHOifAvx5W2=6V+6D6(wX<|jDd zTa3+#ayW@7i$t$7k#P5<2|3gtYt@D37WI*UE_0-;bWWszP5OzVb5fTDFOfqYi5$i{ z^2T;_BKykyTVOi`XC^4^H+2?R+H$any`e1?W3*$}xbNr0N^dDD`5<6jp&i+VnIqD{ zySU8H_wpO%^krMK__RIy=St|{MdFn1kePNOUg zNi9ziOSua!rAe9ZO+}W{foJ=2D;4* zc|v(p^@VM*gJsWEEFEaM(!nh=rN=|+K?H68{jy(b&pv)QvQ0~F3yNpHYwojBzf6Kj z^K0dVtffR#R{Ud2?DmwsEsvOnbkd77;rzo1808AViXM+P`--be27Z)T&(P00sczw2 zl}(DsbL$g(*(V zKOor`jiQ5)$wfQNKqlL$=J;aYdsUK?BZWVGo6;!ONMK}Fz`c7uXp7SlW<>`z#22h> zo_KYWh#w;eGFFmx`BHKnsouij0gf-35!}bTM}8f+082D|{M1d%QnWw(E$&Nw`5Wt! ze!{S^-~;^hV998K;p6%pa)}Sp(269 z@YIxRuvC=Z!_XYXhmSLg!WKK_tgK@$$Tvzz_q}SYnl7n@^Jnqt!6aswlgr;%qxy^RIUuxN_=^&CY5T<1ki>+OuY)sXhBbXf~T1yL`GiY|h}E ztFnv8>^6uP$`D{sSPh6;`et+|@R9V;zEj3B+4)oCZC)bvBBCF* z$n;=)T&L3uitD`UD@^{G58K^P4_jy#sgy#*$*_fY2Wt}~l}v))T-jL}&UU9?WJG6M zYU!+*g8D0~#Buem8wJ`IgwNDZEcu6ZDCc~tZ+I|nJGSK9eBSa}vdG(>w%y{Hiyq6j zqA~-HJiz0K`G;GGcprhAb$AY8QvsvhspDn2M3I-NO?KO+xbu<}3AkASZDv9FiiWKZ z&_4@%a4FZ^Hs6~ZW_Kp!-O#3}yQ=X6V}~kbzT0Khjt(Du7blCjb*wJJYx|(ttE+>Q z(~S*}oflLc22F+}qF=O@9gbXTNlCmE<{oux=g2Xhc*0M*m7Y5m%D=Oyrc`T6F9wd9 z!L;fd>RHNX_nm(wz6bAl4Ci@at8UK~o}OO28lt97dUmhmxr;=B(%Ko5W>f@b6kGGQ z6sx6rS?e=jo_VvHR zae-&7es)=n7O&D``>4Edx#C@@q%wBz`J`oxhSa1=yq;od$gtVIuBLZN#Vd7h9bK%XEC*`KZJ{U^9<*6;x1> zTzj|fO*IxBN87uv zjK7_dI;=YJJVQC*>Cq(z+6BrG9V~mYS}bYN!E`frY4{1+5;+m_()d6svYZaQD4jfC zD0EP2?&KViFPd@Wg=*jpG<%@~s#qtvsOZqygP{Eq-ZnandY(@zCB2CCC}p;Qnfi=M z2N`^ne%7R{i~Ls&M{^5h^@{uZ`ez#H00CVw>!U)6nK0uwK94kBX3&8t$hEh}ez{Hu zkx=~FidD~32>nyOFB9go;29k85AzI%x2ua`OEK>5%8qiVv({o)8U^PH>nrQupQP#1 zL3hFM)9<{aZS+mr*v@M^rF_q2@zzYDqd%PDx`cp zt*CQ!>w!xlhT10sX0r?zQy&cti|s&!MD;(pT96Y7Js|&CukZ`qU2}VH$`W-k%qC14 zcJotW!mRTqr-eSF0UIe)-+g7_OdDEmX4vA+hD-spvjF?JJB~W)4*M`w^#J;JkGrwF znWy`fHoOqd$3Dz!uiBRjJH0RyB%D|Z)9yS(2Pr90v|GFBV8*m3FQjE~|H%Qk2O(uV zEJ4RX{g5x>V@9k_-ICBkw3pq-AJeFF(QU!8sxKCzt2u8NxAjCS&D%=8DrpzraW5Eu zE@=uQRqi~dCvrZHh8(5ITu3(^yI;ws^vvMSo_ym;MBt(wO8`Zl4x|^hcI>LRvblJ% k_o|XhcuJZn^8oa~5rv+S;NSjl4F0Ww|4{=l`ZECj1F+FOw*UYD literal 0 HcmV?d00001 From 983a1eb4bec2753c7c1313534c99d26474a3a9c4 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Fri, 27 Oct 2023 19:49:34 -0700 Subject: [PATCH 21/37] ign -> gz (#547) Signed-off-by: Ian Chen --- graphics/src/AssimpLoader.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphics/src/AssimpLoader.cc b/graphics/src/AssimpLoader.cc index 25d2e9ed6..2eda12480 100644 --- a/graphics/src/AssimpLoader.cc +++ b/graphics/src/AssimpLoader.cc @@ -666,7 +666,7 @@ Mesh *AssimpLoader::Load(const std::string &_filename) 0); if (scene == nullptr) { - ignerr << "Unable to import mesh [" << _filename << "]" << std::endl; + gzerr << "Unable to import mesh [" << _filename << "]" << std::endl; return mesh; } auto& rootNode = scene->mRootNode; From 2de1bd90e9e0181185a6c6654e638dfb25a1eb24 Mon Sep 17 00:00:00 2001 From: jmachowinski Date: Mon, 13 Nov 2023 21:38:54 +0100 Subject: [PATCH 22/37] Fix segfault in case of no write access to log dir (#546) * fix: Don't recursively call init if log directories can not be created. If there is no permission to create the log directories, the FileLogger will call itself recutsive, as is used ignerr for logging the error, which will call FileLogger::Init again. This commit fixed this, by using std::cerr for the log output in FileLogger::Init. Signed-off-by: Janosch Machowinski Co-authored-by: Ian Chen --- include/gz/common/Filesystem.hh | 8 +++++++- src/Console.cc | 11 ++++++++--- src/EnumIface_TEST.cc | 2 +- src/Filesystem.cc | 10 ++++++++-- 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/include/gz/common/Filesystem.hh b/include/gz/common/Filesystem.hh index d1c2ea326..8541d80c5 100644 --- a/include/gz/common/Filesystem.hh +++ b/include/gz/common/Filesystem.hh @@ -63,7 +63,13 @@ namespace ignition /// \return True if directory creation was successful, false otherwise. bool IGNITION_COMMON_VISIBLE createDirectory(const std::string &_path); - /// \brief Create directories for the given path + /// \brief Create directories for the given path errors are printed to the given stream + /// \param[in] _path Path to create directories from + /// \param[in] _errorOut Stream for error output + /// \return true on success + bool IGNITION_COMMON_VISIBLE createDirectories(const std::string &_path, std::ostream &_errorOut); + + /// \brief Create directories for the given path errors are printed on ignerr /// \param[in] _path Path to create directories from /// \return true on success bool IGNITION_COMMON_VISIBLE createDirectories(const std::string &_path); diff --git a/src/Console.cc b/src/Console.cc index d6604448e..c57e68597 100644 --- a/src/Console.cc +++ b/src/Console.cc @@ -261,8 +261,8 @@ void FileLogger::Init(const std::string &_directory, { if (!env(IGN_HOMEDIR, logPath)) { - ignerr << "Missing HOME environment variable." - << "No log file will be generated."; + std::cerr << "Missing HOME environment variable. " + << "No log file will be generated."; return; } logPath = joinPaths(logPath, _directory); @@ -275,7 +275,12 @@ void FileLogger::Init(const std::string &_directory, auto* buf = dynamic_cast(this->rdbuf()); // Create the directory if it doesn't exist. - createDirectories(logPath); + if(!createDirectories(logPath, std::cerr)) + { + std::cerr << "Failed to generate log directories. " + << "No log file will be generated."; + return; + } logPath = joinPaths(logPath, _filename); diff --git a/src/EnumIface_TEST.cc b/src/EnumIface_TEST.cc index 69a70b77b..852e2b7dd 100644 --- a/src/EnumIface_TEST.cc +++ b/src/EnumIface_TEST.cc @@ -41,7 +41,7 @@ IGN_ENUM(myTypeIface, MyType, MY_TYPE_BEGIN, MY_TYPE_END, ///////////////////////////////////////////////// TEST_F(EnumIfaceTest, StringCoversion) { - MyType type; + MyType type = MyType::TYPE2; // Set value from string myTypeIface.Set(type, "TYPE1"); diff --git a/src/Filesystem.cc b/src/Filesystem.cc index b363ad91e..6cf911b7b 100644 --- a/src/Filesystem.cc +++ b/src/Filesystem.cc @@ -465,6 +465,12 @@ bool common::copyDirectory(const std::string &_existingDirname, ///////////////////////////////////////////////// bool common::createDirectories(const std::string &_path) +{ + return createDirectories(_path, ignerr); +} + +///////////////////////////////////////////////// +bool common::createDirectories(const std::string &_path, std::ostream &_errorOut) { size_t index = 0; while (index < _path.size()) @@ -482,8 +488,8 @@ bool common::createDirectories(const std::string &_path) if (mkdir(dir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) != 0) { #endif - ignerr << "Failed to create directory [" + dir + "]: " - << std::strerror(errno) << std::endl; + _errorOut << "Failed to create directory [" + dir + "]: " + << std::strerror(errno) << std::endl; return false; } } From 5b8f27f388168bce293be6c586bfc09c3a7dd7a5 Mon Sep 17 00:00:00 2001 From: "Addisu Z. Taddese" Date: Mon, 13 Nov 2023 14:39:16 -0600 Subject: [PATCH 23/37] Update github action workflows (#558) * Use on `push` only on stable branches to avoid duplicate runs * Update project automation Signed-off-by: Addisu Z. Taddese --- .github/workflows/ci.yml | 12 +++++++++--- .github/workflows/triage.yml | 9 +++------ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 744bfcff3..cd43a3b39 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,12 @@ name: Ubuntu CI -on: [push, pull_request] +on: + pull_request: + push: + branches: + - 'ign-common[0-9]' + - 'gz-common[0-9]' + - 'main' jobs: bionic-ci: @@ -8,7 +14,7 @@ jobs: name: Ubuntu Bionic CI steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Compile and test id: ci uses: ignition-tooling/action-ignition-ci@bionic @@ -19,7 +25,7 @@ jobs: name: Ubuntu Focal CI steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Compile and test id: ci uses: ignition-tooling/action-ignition-ci@focal diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml index 736670e0e..2332244bf 100644 --- a/.github/workflows/triage.yml +++ b/.github/workflows/triage.yml @@ -10,10 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Add ticket to inbox - uses: technote-space/create-project-card-action@v1 + uses: actions/add-to-project@v0.5.0 with: - PROJECT: Core development - COLUMN: Inbox - GITHUB_TOKEN: ${{ secrets.TRIAGE_TOKEN }} - CHECK_ORG_PROJECT: true - + project-url: https://github.com/orgs/gazebosim/projects/7 + github-token: ${{ secrets.TRIAGE_TOKEN }} From e899db5487472180ad96967d98376f2abf495207 Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Mon, 4 Dec 2023 10:51:29 -0600 Subject: [PATCH 24/37] Fix error output when creating directories (#561) Signed-off-by: Michael Carroll --- src/Filesystem.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Filesystem.cc b/src/Filesystem.cc index 6cf911b7b..e8b68cab6 100644 --- a/src/Filesystem.cc +++ b/src/Filesystem.cc @@ -466,11 +466,16 @@ bool common::copyDirectory(const std::string &_existingDirname, ///////////////////////////////////////////////// bool common::createDirectories(const std::string &_path) { - return createDirectories(_path, ignerr); + std::ostringstream ss; + auto ret = createDirectories(_path, ss); + if (!ret) + ignerr << ss.str(); + return ret; } ///////////////////////////////////////////////// -bool common::createDirectories(const std::string &_path, std::ostream &_errorOut) +bool common::createDirectories( + const std::string &_path, std::ostream &_errorOut) { size_t index = 0; while (index < _path.size()) From 680d79777d5c1437e225c38c9e144734fab83285 Mon Sep 17 00:00:00 2001 From: Luca Della Vedova Date: Thu, 14 Dec 2023 22:44:56 +0800 Subject: [PATCH 25/37] Backwards compatible assimp texture name fix (#565) * Backwards compatible assimp texture name fix Signed-off-by: Luca Della Vedova * Fix test Signed-off-by: Luca Della Vedova --------- Signed-off-by: Luca Della Vedova --- graphics/src/AssimpLoader.cc | 7 ++++++- graphics/src/AssimpLoader_TEST.cc | 6 ++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/graphics/src/AssimpLoader.cc b/graphics/src/AssimpLoader.cc index 2eda12480..9e49046ed 100644 --- a/graphics/src/AssimpLoader.cc +++ b/graphics/src/AssimpLoader.cc @@ -584,7 +584,12 @@ ImagePtr AssimpLoader::Implementation::LoadEmbeddedTexture( std::string AssimpLoader::Implementation::GenerateTextureName( const aiScene* _scene, aiMaterial* _mat, const std::string& _type) const { - return ToString(_scene->mRootNode->mName) + "_" + ToString(_mat->GetName()) + +#ifdef GZ_ASSIMP_PRE_5_2_0 + auto rootName = _scene->mRootNode->mName; +#else + auto rootName = _scene->mName; +#endif + return ToString(rootName) + "_" + ToString(_mat->GetName()) + "_" + _type; } diff --git a/graphics/src/AssimpLoader_TEST.cc b/graphics/src/AssimpLoader_TEST.cc index 5df30d0f1..5407a595e 100644 --- a/graphics/src/AssimpLoader_TEST.cc +++ b/graphics/src/AssimpLoader_TEST.cc @@ -673,7 +673,13 @@ TEST_F(AssimpLoader, LoadGlTF2BoxWithJPEGTexture) EXPECT_EQ(math::Color(0.4f, 0.4f, 0.4f, 1.0f), mat->Ambient()); EXPECT_EQ(math::Color(1.0f, 1.0f, 1.0f, 1.0f), mat->Diffuse()); EXPECT_EQ(math::Color(0.0f, 0.0f, 0.0f, 1.0f), mat->Specular()); + // Assimp 5.2.0 and above uses the scene name for its texture names, + // older version use the root node instead. +#ifdef GZ_ASSIMP_PRE_5_2_0 EXPECT_EQ("Cube_Material_Diffuse", mat->TextureImage()); +#else + EXPECT_EQ("Scene_Material_Diffuse", mat->TextureImage()); +#endif EXPECT_NE(nullptr, mat->TextureData()); } From c948a9d25c654c0d1f9e8facf2fa9a885b47290d Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Thu, 21 Dec 2023 06:29:29 -0800 Subject: [PATCH 26/37] Update CI badges in README (#566) Signed-off-by: Ian Chen --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 445034321..10ad90336 100644 --- a/README.md +++ b/README.md @@ -9,10 +9,10 @@ Build | Status -- | -- -Test coverage | [![codecov](https://codecov.io/gh/gazebosim/gz-common/branch/main/graph/badge.svg)](https://codecov.io/gh/gazebosim/gz-common) -Ubuntu Focal | [![Build Status](https://build.osrfoundation.org/buildStatus/icon?job=ignition_common-ci-main-focal-amd64)](https://build.osrfoundation.org/job/ignition_common-ci-main-focal-amd64) -Homebrew | [![Build Status](https://build.osrfoundation.org/buildStatus/icon?job=ignition_common-ci-main-homebrew-amd64)](https://build.osrfoundation.org/job/ignition_common-ci-main-homebrew-amd64) -Windows | [![Build Status](https://build.osrfoundation.org/job/ign_common-ci-win/badge/icon)](https://build.osrfoundation.org/job/ign_common-ci-win/) +Test coverage | [![codecov](https://codecov.io/gh/gazebosim/gz-common/tree/gz-common5/graph/badge.svg)](https://codecov.io/gh/gazebosim/gz-common/tree/gz-common5) +Ubuntu Jammy | [![Build Status](https://build.osrfoundation.org/buildStatus/icon?job=gz_common-ci-gz-common5-jammy-amd64)](https://build.osrfoundation.org/job/gz_common-ci-gz-common5-jammy-amd64) +Homebrew | [![Build Status](https://build.osrfoundation.org/buildStatus/icon?job=gz_common-ci-gz-common5-homebrew-amd64)](https://build.osrfoundation.org/job/gz_common-ci-gz-common5-homebrew-amd64) +Windows | [![Build Status](https://build.osrfoundation.org/job/gz_common-5-win/badge/icon)](https://build.osrfoundation.org/job/gz_common-5-win/) Gazebo Common, a component of [Gazebo](https://gazebosim.org), provides a set of libraries that cover many different use cases. An audio-visual library supports From 41a6410c55f2d69737cc895c3e7cd1c8df065bfc Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Fri, 5 Jan 2024 16:12:01 -0600 Subject: [PATCH 27/37] =?UTF-8?q?=F0=9F=8E=88=20=203.17.0=20(#567)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michael Carroll --- CMakeLists.txt | 2 +- Changelog.md | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a3f1d19c7..f47eb3105 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.10.2 FATAL_ERROR) #============================================================================ # Initialize the project #============================================================================ -project(ignition-common3 VERSION 3.16.0) +project(ignition-common3 VERSION 3.17.0) #============================================================================ # Find ignition-cmake diff --git a/Changelog.md b/Changelog.md index dfd6c1b24..f9de00e17 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,16 @@ ## Gazebo Common 3.x +## Gazebo Common 3.17.0 (2024-01-05) + +1. Fix error output when creating directories + * [Pull request #561](https://github.com/gazebosim/gz-common/pull/561) + +1. Update github action workflows + * [Pull request #558](https://github.com/gazebosim/gz-common/pull/558) + +1. Fix segfault in case of no write access to log dir + * [Pull request #546](https://github.com/gazebosim/gz-common/pull/546) + ## Gazebo Common 3.16.0 (2023-06-05) 1. Include cstdint to build with GCC 13 From e9dbca79df5d17b659f4f6484b95aafef9aba6cf Mon Sep 17 00:00:00 2001 From: Scott K Logan Date: Mon, 29 Jan 2024 09:45:00 -0600 Subject: [PATCH 28/37] Clarify GDAL version requirement (#574) The default behavior of pkg-config seems to be an exact version match. Signed-off-by: Scott K Logan --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 09f53ebbd..402352a42 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -99,6 +99,7 @@ gz_find_package( # Find GDAL gz_find_package(GDAL VERSION 3.0 PKGCONFIG gdal + PKGCONFIG_VER_COMPARISON >= PRIVATE_FOR geospatial REQUIRED_BY geospatial) From bc24ecec0a9e10e8aa5a195755a7d14cd882ae29 Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Mon, 29 Jan 2024 10:09:40 -0600 Subject: [PATCH 29/37] Multiple memory cleanup fixes (#571) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michael Carroll Co-authored-by: Alejandro Hernández Cordero --- graphics/src/AssimpLoader_TEST.cc | 26 ++++++++++++++++++- graphics/src/ColladaExporter_TEST.cc | 9 +++++++ graphics/src/ColladaLoader.cc | 35 ++++++++++++++++++++------ graphics/src/ColladaLoader_TEST.cc | 16 ++++++++++++ graphics/src/Image.cc | 11 +++++++- graphics/src/OBJLoader_TEST.cc | 5 ++++ graphics/src/STLLoader_TEST.cc | 2 ++ graphics/src/Skeleton.cc | 5 ++-- graphics/src/SkeletonAnimation_TEST.cc | 2 ++ graphics/src/SkeletonNode.cc | 4 +-- 10 files changed, 100 insertions(+), 15 deletions(-) diff --git a/graphics/src/AssimpLoader_TEST.cc b/graphics/src/AssimpLoader_TEST.cc index 5407a595e..40a99f7f4 100644 --- a/graphics/src/AssimpLoader_TEST.cc +++ b/graphics/src/AssimpLoader_TEST.cc @@ -49,6 +49,7 @@ TEST_F(AssimpLoader, LoadBox) // Make sure we can read a submesh name EXPECT_STREQ("Cube", mesh->SubMeshByIndex(0).lock()->Name().c_str()); + delete mesh; } ///////////////////////////////////////////////// @@ -104,6 +105,8 @@ TEST_F(AssimpLoader, Material) matOpaque->BlendFactors(srcFactor, dstFactor); EXPECT_DOUBLE_EQ(1.0, srcFactor); EXPECT_DOUBLE_EQ(0.0, dstFactor); + delete mesh; + delete meshOpaque; } ///////////////////////////////////////////////// @@ -155,6 +158,7 @@ TEST_F(AssimpLoader, ShareVertices) } } } + delete mesh; } ///////////////////////////////////////////////// @@ -164,6 +168,7 @@ TEST_F(AssimpLoader, LoadZeroCount) common::Mesh *mesh = loader.Load( common::testing::TestFile("data", "zero_count.dae")); ASSERT_TRUE(mesh); + delete mesh; } ///////////////////////////////////////////////// @@ -251,6 +256,7 @@ TEST_F(AssimpLoader, TexCoordSets) subMeshB->SetTexCoordBySet(2u, math::Vector2d(0.1, 0.2), 1u); EXPECT_EQ(math::Vector2d(0.1, 0.2), subMeshB->TexCoordBySet(2u, 1u)); + delete mesh; } // Test fails for assimp below 5.2.0 @@ -289,6 +295,7 @@ TEST_F(AssimpLoader, LoadBoxWithAnimationOutsideSkeleton) 0, 0, 1, 0, 0, 0, 0, 1); EXPECT_EQ(expectedTrans, poseEnd.at("Armature")); + delete mesh; } #endif @@ -308,6 +315,7 @@ TEST_F(AssimpLoader, LoadBoxInstControllerWithoutSkeleton) common::SkeletonPtr skeleton = mesh->MeshSkeleton(); EXPECT_LT(0u, skeleton->NodeCount()); EXPECT_NE(nullptr, skeleton->NodeById("Armature_Bone")); + delete mesh; } ///////////////////////////////////////////////// @@ -335,6 +343,7 @@ TEST_F(AssimpLoader, LoadBoxMultipleInstControllers) common::SkeletonPtr skeleton = mesh->MeshSkeleton(); EXPECT_LT(0u, skeleton->NodeCount()); EXPECT_NE(nullptr, skeleton->NodeById("Armature_Bone")); + delete mesh; } ///////////////////////////////////////////////// @@ -374,6 +383,7 @@ TEST_F(AssimpLoader, LoadBoxNestedAnimation) 0, 0, 1, 0, 0, 0, 0, 1); EXPECT_EQ(expectedTrans, poseEnd.at("Armature_Bone")); + delete mesh; } ///////////////////////////////////////////////// @@ -392,6 +402,7 @@ TEST_F(AssimpLoader, LoadBoxWithDefaultStride) ASSERT_NE(mesh->MeshSkeleton(), nullptr); // TODO(luca) not working, investigate // ASSERT_EQ(1u, mesh->MeshSkeleton()->AnimationCount()); + delete mesh; } ///////////////////////////////////////////////// @@ -409,6 +420,7 @@ TEST_F(AssimpLoader, LoadBoxWithMultipleGeoms) ASSERT_EQ(2u, mesh->SubMeshCount()); EXPECT_EQ(24u, mesh->SubMeshByIndex(0).lock()->NodeAssignmentsCount()); EXPECT_EQ(0u, mesh->SubMeshByIndex(1).lock()->NodeAssignmentsCount()); + delete mesh; } ///////////////////////////////////////////////// @@ -435,6 +447,7 @@ TEST_F(AssimpLoader, LoadBoxWithHierarchicalNodes) // nested node with name EXPECT_EQ("StaticCubeNested", mesh->SubMeshByIndex(4).lock()->Name()); + delete mesh; } ///////////////////////////////////////////////// @@ -448,6 +461,7 @@ TEST_F(AssimpLoader, MergeBoxWithDoubleSkeleton) // The two skeletons have been joined and their root is the // animation root, called Scene EXPECT_EQ(skeleton_ptr->RootNode()->Name(), std::string("Scene")); + delete mesh; } // For assimp below 5.2.0 mesh loading fails because of @@ -487,6 +501,7 @@ TEST_F(AssimpLoader, LoadCylinderAnimatedFrom3dsMax) // EXPECT_EQ("Bone02", anim->Name()); EXPECT_EQ(1u, anim->NodeCount()); EXPECT_TRUE(anim->HasNode("Bone02")); + delete mesh; } #endif @@ -521,6 +536,7 @@ TEST_F(AssimpLoader, LoadObjBox) EXPECT_EQ(mat->Diffuse(), math::Color(0.512f, 0.512f, 0.512f, 1.0f)); EXPECT_EQ(mat->Specular(), math::Color(0.25, 0.25, 0.25, 1.0)); EXPECT_DOUBLE_EQ(mat->Transparency(), 0.0); + delete mesh; } @@ -536,6 +552,7 @@ TEST_F(AssimpLoader, ObjInvalidMaterial) common::Mesh *mesh = loader.Load(meshFilename); EXPECT_TRUE(mesh != nullptr); + delete mesh; } ///////////////////////////////////////////////// @@ -550,6 +567,7 @@ TEST_F(AssimpLoader, NonExistingMesh) common::Mesh *mesh = loader.Load(meshFilename); EXPECT_EQ(mesh->SubMeshCount(), 0); + delete mesh; } ///////////////////////////////////////////////// @@ -584,6 +602,7 @@ TEST_F(AssimpLoader, LoadFbxBox) EXPECT_EQ(mat->Diffuse(), math::Color(0.8f, 0.8f, 0.8f, 1.0f)); EXPECT_EQ(mat->Specular(), math::Color(0.8f, 0.8f, 0.8f, 1.0f)); EXPECT_DOUBLE_EQ(mat->Transparency(), 0.0); + delete mesh; } ///////////////////////////////////////////////// @@ -618,6 +637,7 @@ TEST_F(AssimpLoader, LoadGlTF2Box) EXPECT_EQ(mat->Diffuse(), math::Color(0.8f, 0.8f, 0.8f, 1.0f)); EXPECT_EQ(mat->Specular(), math::Color(0.0f, 0.0f, 0.0f, 1.0f)); EXPECT_DOUBLE_EQ(mat->Transparency(), 0.0); + delete mesh; } ///////////////////////////////////////////////// @@ -642,6 +662,7 @@ TEST_F(AssimpLoader, LoadGlTF2BoxExternalTexture) auto testTextureFile = common::testing::TestFile("data/gltf", "PurpleCube_Diffuse.png"); EXPECT_EQ(testTextureFile, mat->TextureImage()); + delete mesh; } ///////////////////////////////////////////////// @@ -681,6 +702,7 @@ TEST_F(AssimpLoader, LoadGlTF2BoxWithJPEGTexture) EXPECT_EQ("Scene_Material_Diffuse", mat->TextureImage()); #endif EXPECT_NE(nullptr, mat->TextureData()); + delete mesh; } ///////////////////////////////////////////////// @@ -733,7 +755,6 @@ TEST_F(AssimpLoader, LoadGlbPbrAsset) EXPECT_EQ(img->Pixel(0, 0), math::Color(0.0f, 0.0f, 0.0f, 1.0f)); EXPECT_EQ(img->Pixel(100, 100), math::Color(1.0f, 1.0f, 1.0f, 1.0f)); - EXPECT_NE(pbr->NormalMapData(), nullptr); // Metallic roughness and alpha from textures only works in assimp > 5.2.0 #ifndef GZ_ASSIMP_PRE_5_2_0 @@ -765,6 +786,7 @@ TEST_F(AssimpLoader, LoadGlbPbrAsset) EXPECT_STREQ("Action1", skel->Animation(0)->Name().c_str()); EXPECT_STREQ("Action2", skel->Animation(1)->Name().c_str()); EXPECT_STREQ("Action3", skel->Animation(2)->Name().c_str()); + delete mesh; } ///////////////////////////////////////////////// @@ -782,6 +804,7 @@ TEST_F(AssimpLoader, CheckNonRootDisplacement) EXPECT_EQ(nullptr, rootNode); auto xDisplacement = skelAnim->XDisplacement(); ASSERT_TRUE(xDisplacement); + delete mesh; } ///////////////////////////////////////////////// @@ -825,4 +848,5 @@ TEST_F(AssimpLoader, LoadGLTF2Triangle) EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoord(0u)); EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoord(1u)); EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoord(2u)); + delete mesh; } diff --git a/graphics/src/ColladaExporter_TEST.cc b/graphics/src/ColladaExporter_TEST.cc index a6c7eb134..5b4851d2e 100644 --- a/graphics/src/ColladaExporter_TEST.cc +++ b/graphics/src/ColladaExporter_TEST.cc @@ -112,6 +112,8 @@ TEST_F(ColladaExporter, ExportBox) meshReloaded->SubMeshByIndex(i).lock()->TexCoord(j)); } } + delete meshOriginal; + delete meshReloaded; } ///////////////////////////////////////////////// @@ -206,6 +208,8 @@ TEST_F(ColladaExporter, ExportCordlessDrill) meshReloaded->SubMeshByIndex(i).lock()->TexCoord(j)); } } + delete meshOriginal; + delete meshReloaded; } ///////////////////////////////////////////////// @@ -319,6 +323,10 @@ TEST_F(ColladaExporter, ExportMeshWithSubmeshes) EXPECT_EQ(outMesh.NormalCount(), meshReloaded->NormalCount()); EXPECT_EQ(outMesh.TexCoordCount(), meshReloaded->TexCoordCount()); + + delete boxMesh; + delete drillMesh; + delete meshReloaded; } TEST_F(ColladaExporter, ExportLights) @@ -448,4 +456,5 @@ TEST_F(ColladaExporter, ExportLights) } } EXPECT_EQ(node_with_light_count, 3); + delete meshOriginal; } diff --git a/graphics/src/ColladaLoader.cc b/graphics/src/ColladaLoader.cc index cbcb0bf70..2040222e6 100644 --- a/graphics/src/ColladaLoader.cc +++ b/graphics/src/ColladaLoader.cc @@ -366,9 +366,7 @@ ColladaLoader::ColladaLoader() } ////////////////////////////////////////////////// -ColladaLoader::~ColladaLoader() -{ -} +ColladaLoader::~ColladaLoader() = default; ////////////////////////////////////////////////// Mesh *ColladaLoader::Load(const std::string &_filename) @@ -772,13 +770,16 @@ void ColladaLoader::Implementation::LoadController( { SkeletonNode *rootSkelNode = this->LoadSkeletonNodes(rootNodeXml, nullptr); + if (nullptr == skeleton) { - skeleton = SkeletonPtr(new Skeleton(rootSkelNode)); + skeleton = std::make_shared(rootSkelNode); _mesh->SetSkeleton(skeleton); } else if (nullptr != rootSkelNode) + { this->MergeSkeleton(skeleton, rootSkelNode); + } } if (nullptr == skeleton) { @@ -1191,7 +1192,7 @@ SkeletonNode *ColladaLoader::Implementation::LoadSkeletonNodes( return nullptr; } - auto node = this->LoadSingleSkeletonNode(_xml, _parent); + auto *node = this->LoadSingleSkeletonNode(_xml, _parent); this->SetSkeletonNodeTransform(_xml, node); auto childXml = _xml->FirstChildElement("node"); @@ -2800,8 +2801,14 @@ void ColladaLoader::Implementation::MergeSkeleton(SkeletonPtr _skeleton, if (currentRoot->Id() == _mergeNode->Id()) return; - if (_mergeNode->ChildById(currentRoot->Id())) + auto *rootInMergeNode = _mergeNode->ChildById(currentRoot->Id()); + + if (rootInMergeNode != nullptr) { + if (rootInMergeNode != currentRoot) + { + delete currentRoot; + } _skeleton->RootNode(_mergeNode); return; } @@ -2822,8 +2829,21 @@ void ColladaLoader::Implementation::MergeSkeleton(SkeletonPtr _skeleton, } if (mergeNodeContainsRoot) { + std::function delete_children; + delete_children = + [&delete_children](SkeletonNode *node)-> void { + for (size_t ii = 0; ii < node->ChildCount(); ++ii) + { + auto *child = node->Child(ii); + delete_children(child); + delete child; + } + }; + + // Since we are replacing the whole tree, recursively clean up + // the existing skeleton nodes _skeleton->RootNode(_mergeNode); - // TODO(anyone) since we are replacing the whole tree delete the old one + delete_children(currentRoot); delete currentRoot; return; } @@ -2834,6 +2854,7 @@ void ColladaLoader::Implementation::MergeSkeleton(SkeletonPtr _skeleton, dummyRoot = new SkeletonNode(nullptr, "dummy-root", "dummy-root"); } + if (dummyRoot != currentRoot) { dummyRoot->AddChild(currentRoot); diff --git a/graphics/src/ColladaLoader_TEST.cc b/graphics/src/ColladaLoader_TEST.cc index 63e0fe75d..e8ea61f80 100644 --- a/graphics/src/ColladaLoader_TEST.cc +++ b/graphics/src/ColladaLoader_TEST.cc @@ -49,6 +49,7 @@ TEST_F(ColladaLoader, LoadBox) // Make sure we can read a submesh name EXPECT_STREQ("Cube", mesh->SubMeshByIndex(0).lock()->Name().c_str()); + delete mesh; } ///////////////////////////////////////////////// @@ -100,6 +101,7 @@ TEST_F(ColladaLoader, ShareVertices) } } } + delete mesh; } ///////////////////////////////////////////////// @@ -124,6 +126,7 @@ TEST_F(ColladaLoader, LoadZeroCount) EXPECT_NE(log.find("Normal source has a float_array with a count of zero"), std::string::npos); #endif + delete mesh; } ///////////////////////////////////////////////// @@ -179,6 +182,9 @@ TEST_F(ColladaLoader, Material) matOpaque->BlendFactors(srcFactor, dstFactor); EXPECT_DOUBLE_EQ(1.0, srcFactor); EXPECT_DOUBLE_EQ(0.0, dstFactor); + + delete mesh; + delete meshOpaque; } ///////////////////////////////////////////////// @@ -266,6 +272,7 @@ TEST_F(ColladaLoader, TexCoordSets) subMeshB->SetTexCoordBySet(2u, math::Vector2d(0.1, 0.2), 1u); EXPECT_EQ(math::Vector2d(0.1, 0.2), subMeshB->TexCoordBySet(2u, 1u)); + delete mesh; } ///////////////////////////////////////////////// @@ -302,6 +309,7 @@ TEST_F(ColladaLoader, LoadBoxWithAnimationOutsideSkeleton) 0, 0, 1, 0, 0, 0, 0, 1); EXPECT_EQ(expectedTrans, poseEnd.at("Armature")); + delete mesh; } ///////////////////////////////////////////////// @@ -320,6 +328,7 @@ TEST_F(ColladaLoader, LoadBoxInstControllerWithoutSkeleton) common::SkeletonPtr skeleton = mesh->MeshSkeleton(); EXPECT_LT(0u, skeleton->NodeCount()); EXPECT_NE(nullptr, skeleton->NodeById("Armature_Bone")); + delete mesh; } ///////////////////////////////////////////////// @@ -347,6 +356,7 @@ TEST_F(ColladaLoader, LoadBoxMultipleInstControllers) common::SkeletonPtr skeleton = mesh->MeshSkeleton(); EXPECT_NE(nullptr, skeleton->NodeById("Armature_Bone")); EXPECT_NE(nullptr, skeleton->NodeById("Armature_Bone2")); + delete mesh; } ///////////////////////////////////////////////// @@ -383,6 +393,7 @@ TEST_F(ColladaLoader, LoadBoxNestedAnimation) 0, 0, 1, 0, 0, 0, 0, 1); EXPECT_EQ(expectedTrans, poseEnd.at("Bone")); + delete mesh; } ///////////////////////////////////////////////// @@ -398,6 +409,7 @@ TEST_F(ColladaLoader, LoadBoxWithDefaultStride) EXPECT_EQ(1u, mesh->MaterialCount()); EXPECT_EQ(35u, mesh->TexCoordCount()); ASSERT_EQ(1u, mesh->MeshSkeleton()->AnimationCount()); + delete mesh; } ///////////////////////////////////////////////// @@ -415,6 +427,7 @@ TEST_F(ColladaLoader, LoadBoxWithMultipleGeoms) ASSERT_EQ(2u, mesh->SubMeshCount()); EXPECT_EQ(24u, mesh->SubMeshByIndex(0).lock()->NodeAssignmentsCount()); EXPECT_EQ(0u, mesh->SubMeshByIndex(1).lock()->NodeAssignmentsCount()); + delete mesh; } ///////////////////////////////////////////////// @@ -440,6 +453,7 @@ TEST_F(ColladaLoader, LoadBoxWithHierarchicalNodes) // Parent of nested node with name EXPECT_EQ("StaticCubeParent2", mesh->SubMeshByIndex(4).lock()->Name()); + delete mesh; } ///////////////////////////////////////////////// @@ -453,6 +467,7 @@ TEST_F(ColladaLoader, MergeBoxWithDoubleSkeleton) // The two skeletons have been joined and their root is the // animation root, called Armature EXPECT_EQ(skeleton_ptr->RootNode()->Name(), std::string("Armature")); + delete mesh; } ///////////////////////////////////////////////// @@ -488,4 +503,5 @@ TEST_F(ColladaLoader, LoadCylinderAnimatedFrom3dsMax) EXPECT_EQ("Bone02", anim->Name()); EXPECT_EQ(1u, anim->NodeCount()); EXPECT_TRUE(anim->HasNode("Bone02")); + delete mesh; } diff --git a/graphics/src/Image.cc b/graphics/src/Image.cc index 1bbdf74ff..cfd01ffa1 100644 --- a/graphics/src/Image.cc +++ b/graphics/src/Image.cc @@ -688,8 +688,17 @@ BOOL Image::Implementation::PixelIndex( ////////////////////////////////////////////////// void Image::Rescale(int _width, int _height) { - this->dataPtr->bitmap = FreeImage_Rescale( + auto *scaled = FreeImage_Rescale( this->dataPtr->bitmap, _width, _height, FILTER_LANCZOS3); + + if (!scaled) + { + gzerr << "Failed to rescale image\n"; + return; + } + + FreeImage_Unload(this->dataPtr->bitmap); + this->dataPtr->bitmap = scaled; } ////////////////////////////////////////////////// diff --git a/graphics/src/OBJLoader_TEST.cc b/graphics/src/OBJLoader_TEST.cc index aa2b79411..8ed7571f8 100644 --- a/graphics/src/OBJLoader_TEST.cc +++ b/graphics/src/OBJLoader_TEST.cc @@ -59,6 +59,8 @@ TEST_F(OBJLoaderTest, LoadObjBox) EXPECT_EQ(mat->Diffuse(), math::Color(0.512f, 0.512f, 0.512f, 1.0f)); EXPECT_EQ(mat->Specular(), math::Color(0.25, 0.25, 0.25, 1.0)); EXPECT_DOUBLE_EQ(mat->Transparency(), 0.0); + + delete mesh; } ///////////////////////////////////////////////// @@ -73,6 +75,7 @@ TEST_F(OBJLoaderTest, InvalidMaterial) gz::common::Mesh *mesh = objLoader.Load(meshFilename); EXPECT_TRUE(mesh != nullptr); + delete mesh; } ///////////////////////////////////////////////// @@ -104,6 +107,7 @@ TEST_F(OBJLoaderTest, PBR) EXPECT_EQ("LightDome_Metalness.png", pbr->MetalnessMap()); EXPECT_EQ("LightDome_Roughness.png", pbr->RoughnessMap()); EXPECT_EQ("LightDome_Normal.png", pbr->NormalMap()); + delete mesh; } // load obj file exported by blender - it shoves pbr maps into @@ -131,5 +135,6 @@ TEST_F(OBJLoaderTest, PBR) EXPECT_EQ("mesh_Metal.png", pbr->MetalnessMap()); EXPECT_EQ("mesh_Rough.png", pbr->RoughnessMap()); EXPECT_EQ("mesh_Normal.png", pbr->NormalMap()); + delete mesh; } } diff --git a/graphics/src/STLLoader_TEST.cc b/graphics/src/STLLoader_TEST.cc index 7fd1e1d59..15189896c 100644 --- a/graphics/src/STLLoader_TEST.cc +++ b/graphics/src/STLLoader_TEST.cc @@ -61,6 +61,7 @@ TEST_F(STLLoaderTest, LoadSTL) EXPECT_EQ(math::Vector3d(0, 0, -1), subMesh->Normal(2u)); EXPECT_STREQ("", mesh->SubMeshByIndex(0).lock()->Name().c_str()); + delete mesh; mesh = loader.Load( common::testing::TestFile("data", "cube_binary.stl")); @@ -88,4 +89,5 @@ TEST_F(STLLoaderTest, LoadSTL) EXPECT_EQ(math::Vector3d(0, 0, -1), subMesh->Normal(2u)); EXPECT_STREQ("", mesh->SubMeshByIndex(0).lock()->Name().c_str()); + delete mesh; } diff --git a/graphics/src/Skeleton.cc b/graphics/src/Skeleton.cc index cf0bfb954..f63f4f0bc 100644 --- a/graphics/src/Skeleton.cc +++ b/graphics/src/Skeleton.cc @@ -72,10 +72,9 @@ Skeleton::Skeleton() ////////////////////////////////////////////////// Skeleton::Skeleton(SkeletonNode *_root) -: dataPtr(gz::utils::MakeUniqueImpl()) +: Skeleton() { - this->dataPtr->root = _root; - this->BuildNodeMap(); + this->RootNode(_root); } ////////////////////////////////////////////////// diff --git a/graphics/src/SkeletonAnimation_TEST.cc b/graphics/src/SkeletonAnimation_TEST.cc index 61e748696..eecb8a367 100644 --- a/graphics/src/SkeletonAnimation_TEST.cc +++ b/graphics/src/SkeletonAnimation_TEST.cc @@ -32,4 +32,6 @@ TEST_F(SkeletonAnimation, CheckNoXDisplacement) new common::SkeletonAnimation("emptyAnimation"); auto xDisplacement = skelAnim->XDisplacement(); ASSERT_FALSE(xDisplacement); + + delete skelAnim; } diff --git a/graphics/src/SkeletonNode.cc b/graphics/src/SkeletonNode.cc index dc562acfa..b3e3e0c12 100644 --- a/graphics/src/SkeletonNode.cc +++ b/graphics/src/SkeletonNode.cc @@ -87,9 +87,7 @@ SkeletonNode::SkeletonNode(SkeletonNode *_parent, } ////////////////////////////////////////////////// -SkeletonNode::~SkeletonNode() -{ -} +SkeletonNode::~SkeletonNode() = default; ////////////////////////////////////////////////// void SkeletonNode::Name(const std::string &_name) From 3b815419c2f5c69dffc05971f8bc8556de332a49 Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Mon, 29 Jan 2024 11:30:37 -0600 Subject: [PATCH 30/37] Be louder when graphics is missing for geospatial (#573) Signed-off-by: Michael Carroll --- CMakeLists.txt | 2 +- geospatial/src/CMakeLists.txt | 45 +++++++++++++++++++---------------- 2 files changed, 25 insertions(+), 22 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 402352a42..b7697c738 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -138,7 +138,7 @@ configure_file("${PROJECT_SOURCE_DIR}/cppcheck.suppress.in" ${PROJECT_BINARY_DIR}/cppcheck.suppress) gz_configure_build(QUIT_IF_BUILD_ERRORS - COMPONENTS av events geospatial graphics io profiler testing) + COMPONENTS av events graphics geospatial io profiler testing) #============================================================================ # Create package information diff --git a/geospatial/src/CMakeLists.txt b/geospatial/src/CMakeLists.txt index f9fdcedc8..bf528bee7 100644 --- a/geospatial/src/CMakeLists.txt +++ b/geospatial/src/CMakeLists.txt @@ -1,24 +1,27 @@ -gz_get_libsources_and_unittests(sources gtest_sources) +if (TARGET ${PROJECT_LIBRARY_TARGET_NAME}-graphics) + gz_get_libsources_and_unittests(sources gtest_sources) + gz_add_component(geospatial + SOURCES ${sources} + DEPENDS_ON_COMPONENTS graphics + GET_TARGET_NAME geospatial_target) -gz_add_component(geospatial - SOURCES ${sources} - DEPENDS_ON_COMPONENTS graphics - GET_TARGET_NAME geospatial_target) + target_link_libraries(${geospatial_target} + PUBLIC + ${PROJECT_LIBRARY_TARGET_NAME}-graphics + gz-math${GZ_MATH_VER}::gz-math${GZ_MATH_VER} + gz-utils${GZ_UTILS_VER}::gz-utils${GZ_UTILS_VER} + PRIVATE + ${GDAL_LIBRARY}) -target_link_libraries(${geospatial_target} - PUBLIC - ${PROJECT_LIBRARY_TARGET_NAME}-graphics - gz-math${GZ_MATH_VER}::gz-math${GZ_MATH_VER} - gz-utils${GZ_UTILS_VER}::gz-utils${GZ_UTILS_VER} - PRIVATE - ${GDAL_LIBRARY}) + target_include_directories(${geospatial_target} + PRIVATE + ${GDAL_INCLUDE_DIR}) -target_include_directories(${geospatial_target} - PRIVATE - ${GDAL_INCLUDE_DIR}) - -gz_build_tests(TYPE UNIT SOURCES ${gtest_sources} - LIB_DEPS - ${geospatial_target} - gz-common${GZ_COMMON_VER}-testing -) + gz_build_tests(TYPE UNIT SOURCES ${gtest_sources} + LIB_DEPS + ${geospatial_target} + gz-common${GZ_COMMON_VER}-testing + ) +else() + message(WARNING "Skipping component [geospatial]: Missing component [graphics].\n ^~~~~ Set SKIP_geospatial=true in cmake to suppress this warning.") +endif() From 3374a3b148e6858be571e5860acd2e51414ac325 Mon Sep 17 00:00:00 2001 From: Michael Carroll Date: Mon, 5 Feb 2024 11:52:38 -0600 Subject: [PATCH 31/37] Prepare for 5.5.0 release (#575) Signed-off-by: Michael Carroll --- CMakeLists.txt | 2 +- Changelog.md | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b7697c738..199b4031b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.10.2 FATAL_ERROR) #============================================================================ # Initialize the project #============================================================================ -project(gz-common5 VERSION 5.4.2) +project(gz-common5 VERSION 5.5.0) set(GZ_COMMON_VER ${PROJECT_VERSION_MAJOR}) #============================================================================ diff --git a/Changelog.md b/Changelog.md index c1f38cc61..0234f1e70 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,57 @@ ## Gazebo Common 5.x +## Gazebo Common 5.5.0 (2024-02-26) + +1. Be louder when graphics is missing for geospatial + * [Pull request #573](https://github.com/gazebosim/gz-common/pull/573) + +1. Multiple memory cleanup fixes + * [Pull request #571](https://github.com/gazebosim/gz-common/pull/571) + +1. Clarify GDAL version requirement + * [Pull request #574](https://github.com/gazebosim/gz-common/pull/574) + +1. 🎈 3.17.0 + * [Pull request #567](https://github.com/gazebosim/gz-common/pull/567) + +1. Update CI badges in README + * [Pull request #566](https://github.com/gazebosim/gz-common/pull/566) + +1. Backwards compatible assimp texture name fix + * [Pull request #565](https://github.com/gazebosim/gz-common/pull/565) + +1. Fix error output when creating directories + * [Pull request #561](https://github.com/gazebosim/gz-common/pull/561) + +1. Update github action workflows + * [Pull request #558](https://github.com/gazebosim/gz-common/pull/558) + +1. Fix segfault in case of no write access to log dir + * [Pull request #546](https://github.com/gazebosim/gz-common/pull/546) + +1. ign -> gz + * [Pull request #547](https://github.com/gazebosim/gz-common/pull/547) + +1. Support loading glb with compressed jpeg textures + * [Pull request #545](https://github.com/gazebosim/gz-common/pull/545) + +1. Fix glTF / glb root node transform + * [Pull request #543](https://github.com/gazebosim/gz-common/pull/543) + +1. EnumIface: suppress deprecation warning + * [Pull request #540](https://github.com/gazebosim/gz-common/pull/540) + +1. Prevent loading lightmaps if mesh is a glb file that has an occlusion-metallic-roughness texture + * [Pull request #538](https://github.com/gazebosim/gz-common/pull/538) + +1. 🎈 3.16.0 + * [Pull request #519](https://github.com/gazebosim/gz-common/pull/519) + +1. Fix cstdint with GCC 13 + * [Pull request #528](https://github.com/gazebosim/gz-common/pull/528) + * [Pull request #517](https://github.com/gazebosim/gz-common/pull/517) + * [Pull request #513](https://github.com/gazebosim/gz-common/pull/513) + ## Gazebo Common 5.4.2 (2023-09-26) 1. Documentation fixes From 3571cb4f425e34f5619f2ee7e3159c6b3f28c466 Mon Sep 17 00:00:00 2001 From: "Addisu Z. Taddese" Date: Mon, 4 Mar 2024 02:59:09 -0600 Subject: [PATCH 32/37] Fix noise issue that appears in certain image textures (#578) The issue is that if a pixel's color value in 8 bit format is close 1 (out of 255), when assigned to `math::Color`, `math::Color::Clamp` assumes it to be in the floating point range [0.0 - 1.0] instead of 8 bit unsigned range of [0 - 255]. The fix here is to convert colors to floating point at the source instead of relying on `Color::Clamp`. Signed-off-by: Addisu Z. Taddese --- graphics/src/Image.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/graphics/src/Image.cc b/graphics/src/Image.cc index cfd01ffa1..9d5552ad4 100644 --- a/graphics/src/Image.cc +++ b/graphics/src/Image.cc @@ -533,7 +533,8 @@ math::Color Image::Pixel(unsigned int _x, unsigned int _y) const << _x << " " << _y << "] \n"; return clr; } - clr.Set(firgb.rgbRed, firgb.rgbGreen, firgb.rgbBlue); + clr.Set(firgb.rgbRed / 255.0f, firgb.rgbGreen / 255.0f, + firgb.rgbBlue / 255.0f); } else { @@ -606,7 +607,8 @@ math::Color Image::MaxColor() const << x << " " << y << "] \n"; continue; } - clr.Set(firgb.rgbRed, firgb.rgbGreen, firgb.rgbBlue); + clr.Set(firgb.rgbRed / 255.0f, firgb.rgbGreen / 255.0f, + firgb.rgbBlue / 255.0f); if (clr.R() + clr.G() + clr.B() > maxClr.R() + maxClr.G() + maxClr.B()) { From b3ba7ab735adaacd8997701d413d6bc8f4cbee2e Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Mon, 4 Mar 2024 07:13:32 -0800 Subject: [PATCH 33/37] Extend AssimpLoader to parse material transmission factor (#577) Signed-off-by: Ian Chen Co-authored-by: Michael Carroll --- graphics/src/AssimpLoader.cc | 23 +++++++++++++++++++++-- graphics/src/AssimpLoader_TEST.cc | 24 ++++++++++++++++++++++++ graphics/src/CMakeLists.txt | 10 ++++++++-- test/data/box_transmission.glb | Bin 0 -> 2140 bytes 4 files changed, 53 insertions(+), 4 deletions(-) create mode 100644 test/data/box_transmission.glb diff --git a/graphics/src/AssimpLoader.cc b/graphics/src/AssimpLoader.cc index 9e49046ed..24b2f2faa 100644 --- a/graphics/src/AssimpLoader.cc +++ b/graphics/src/AssimpLoader.cc @@ -357,8 +357,26 @@ MaterialPtr AssimpLoader::Implementation::CreateMaterial( } float opacity = 1.0; ret = assimpMat->Get(AI_MATKEY_OPACITY, opacity); - mat->SetTransparency(1.0 - opacity); - mat->SetBlendFactors(opacity, 1.0 - opacity); + if (ret == AI_SUCCESS) + { + mat->SetTransparency(1.0 - opacity); + mat->SetBlendFactors(opacity, 1.0 - opacity); + } + +#ifndef GZ_ASSIMP_PRE_5_1_0 + // basic support for transmission - currently just overrides opacity + // \todo(iche033) The transmission factor can be used with volume + // material extension to simulate effects like refraction + // so consider also extending support for other properties like + // AI_MATKEY_VOLUME_THICKNESS_FACTOR + float transmission = 0.0; + ret = assimpMat->Get(AI_MATKEY_TRANSMISSION_FACTOR, transmission); + if (ret == AI_SUCCESS) + { + mat->SetTransparency(transmission); + } +#endif + // TODO(luca) more than one texture, Gazebo assumes UV index 0 Pbr pbr; aiString texturePath(_path.c_str()); @@ -593,6 +611,7 @@ std::string AssimpLoader::Implementation::GenerateTextureName( "_" + _type; } +////////////////////////////////////////////////// SubMesh AssimpLoader::Implementation::CreateSubMesh( const aiMesh* _assimpMesh, const math::Matrix4d& _transform) const { diff --git a/graphics/src/AssimpLoader_TEST.cc b/graphics/src/AssimpLoader_TEST.cc index 40a99f7f4..6be2e5798 100644 --- a/graphics/src/AssimpLoader_TEST.cc +++ b/graphics/src/AssimpLoader_TEST.cc @@ -665,6 +665,30 @@ TEST_F(AssimpLoader, LoadGlTF2BoxExternalTexture) delete mesh; } +///////////////////////////////////////////////// +// Open a gltf mesh with transmission extension +TEST_F(AssimpLoader, LoadGlTF2BoxTransmission) +{ +#ifndef GZ_ASSIMP_PRE_5_1_0 + common::AssimpLoader loader; + common::Mesh *mesh = loader.Load( + common::testing::TestFile("data", "box_transmission.glb")); + + EXPECT_STREQ("unknown", mesh->Name().c_str()); + + // Make sure we can read the submesh name + EXPECT_STREQ("Cube", mesh->SubMeshByIndex(0).lock()->Name().c_str()); + + EXPECT_EQ(mesh->MaterialCount(), 1u); + + const common::MaterialPtr mat = mesh->MaterialByIndex(0u); + ASSERT_TRUE(mat.get()); + // transmission currently modeled as transparency + EXPECT_FLOAT_EQ(0.1, mat->Transparency()); + delete mesh; +#endif +} + ///////////////////////////////////////////////// // This test loads a box glb mesh with embedded compressed jpeg texture TEST_F(AssimpLoader, LoadGlTF2BoxWithJPEGTexture) diff --git a/graphics/src/CMakeLists.txt b/graphics/src/CMakeLists.txt index aa832387e..b153bf67e 100644 --- a/graphics/src/CMakeLists.txt +++ b/graphics/src/CMakeLists.txt @@ -29,16 +29,22 @@ gz_build_tests( ) # Assimp doesn't offer preprocessor version, use cmake to set a compatibility -# mode for versions below 5.2.0 +# mode for versions below 5.2.0 and 5.1.0 if(${GzAssimp_VERSION} STRLESS "5.2.0") message("Warning, assimp below 5.2.0 detected, setting compatibility mode") target_compile_definitions(${graphics_target} PRIVATE GZ_ASSIMP_PRE_5_2_0) if(TARGET UNIT_AssimpLoader_TEST) target_compile_definitions(UNIT_AssimpLoader_TEST PRIVATE GZ_ASSIMP_PRE_5_2_0) endif() + if(${GzAssimp_VERSION} STRLESS "5.1.0") + message("Warning, assimp below 5.1.0 detected, setting compatibility mode") + target_compile_definitions(${graphics_target} PRIVATE GZ_ASSIMP_PRE_5_1_0) + if(TARGET UNIT_AssimpLoader_TEST) + target_compile_definitions(UNIT_AssimpLoader_TEST PRIVATE GZ_ASSIMP_PRE_5_1_0) + endif() + endif() endif() - if(USE_EXTERNAL_TINYXML2) # If we are using an external copy of tinyxml2, add its imported target diff --git a/test/data/box_transmission.glb b/test/data/box_transmission.glb new file mode 100644 index 0000000000000000000000000000000000000000..adc74d3881100ce746497fd1d1aeacf809f2eb54 GIT binary patch literal 2140 zcmb7ETT|0O6g~nfC|(fM*UUc0nxywWh)`juK;yKPaRwPk+il5An=qRcVLF}ZKhvM( zqvve1r43N(!tU8~`_9>Kw`q53y9)sDa~a_862RB0UG6g{2zbZ}eb(k47f$F4R$%+* z!uR|@Yh#MG-Qix73$66g)-IEYOv1`Con3MfxW0$MR>EKdo$;%Xdt?f}2fWD&$83ME za@uu5E?lP*oQA^jg036LEIsCQvFwS?f(ACh3P!XG5T(yNr;9dLl|G$$ev^~KI2mAu z()z5+gL9c0#TR>Z&M0Kuf<$Zjy?TdN-6r)IiXNZroOqbgHF_scvFkJ>T}Hw*M#D_y z4b#k+7V`R&`|Jc6Di#XG`KSqw3Y- z*1=XqKKC1PCdraS)vrT-z`b^eKNKsM(-|4;R!e>X@a98DwCQ7zOyx6pC5noNb0yfk z+4w}KE2pz`kU48-W6AgvnodsAQyq~I5ioJ;T#6o)rtK{PY?sRTH5_h6^Y;UL@~fEp z_L~zJ_w5x1`S02*8_8LF<)0-tqka?)>G8eNN#3zXisT?44dM%ik(35nM!95HLt#mM zsH``6bK_t=8A}j|qxy)~l5W*o)T@e-Ihd=4>@5~UK2>MjTP)@l4%X9f!X5I@r;$A_ zDSn8)xTH9;$0fymj_^sI;RKu%M>?RK)HSpXagwBQM!Lc!8}5}3@CaVQ2Cf_M6rRBo sSjTl8-osmX1FN{M!Yg Date: Thu, 14 Mar 2024 08:49:31 -0500 Subject: [PATCH 34/37] Various Bazel adjustments for linting (#582) Signed-off-by: Michael Carroll --- BUILD.bazel | 20 +++++++++++++++++++- av/BUILD.bazel | 12 ++++++++++++ events/BUILD.bazel | 12 ++++++++++++ geospatial/BUILD.bazel | 16 ++++++++++++++++ graphics/BUILD.bazel | 16 ++++++++++++++++ io/BUILD.bazel | 15 +++++++++++++++ profiler/BUILD.bazel | 13 +++++++++++++ test/BUILD.bazel | 13 +++++++++++++ testing/BUILD.bazel | 13 +++++++++++++ 9 files changed, 129 insertions(+), 1 deletion(-) diff --git a/BUILD.bazel b/BUILD.bazel index 33ac2dac8..9ebf6f9c9 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -7,13 +7,24 @@ load( "gz_export_header", "gz_include_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) +load("@rules_license//rules:license.bzl", "license") package( + default_applicable_licenses = [GZ_ROOT + "common:license"], default_visibility = GZ_VISIBILITY, features = GZ_FEATURES, ) -licenses(["notice"]) # Apache-2.0 +license( + name = "license", + package_name = "gz-common", +) + +licenses(["notice"]) exports_files(["LICENSE"]) @@ -67,6 +78,10 @@ cc_library( name = "common", srcs = sources + private_headers, hdrs = public_headers, + copts = [ + "-fexceptions", + "-Wno-unused-value", + ], includes = ["include"], deps = [ GZ_ROOT + "utils", @@ -86,6 +101,7 @@ test_sources = glob( [cc_test( name = src.replace("/", "_").replace(".cc", "").replace("src_", ""), srcs = [src], + copts = ["-fexceptions"], deps = [ ":common", GZ_ROOT + "common/testing", @@ -93,3 +109,5 @@ test_sources = glob( "@gtest//:gtest_main", ], ) for src in test_sources] + +add_lint_tests() diff --git a/av/BUILD.bazel b/av/BUILD.bazel index 93fb8a6a6..8f833d548 100644 --- a/av/BUILD.bazel +++ b/av/BUILD.bazel @@ -1,10 +1,20 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", "GZ_VISIBILITY", "gz_export_header", "gz_include_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) public_headers_no_gen = glob([ "include/gz/common/*.hh", @@ -68,3 +78,5 @@ cc_library( "@gtest//:gtest_main", ], ) for src in test_sources] + +add_lint_tests() diff --git a/events/BUILD.bazel b/events/BUILD.bazel index 4d6936b99..23af24c1d 100644 --- a/events/BUILD.bazel +++ b/events/BUILD.bazel @@ -1,10 +1,20 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", "GZ_VISIBILITY", "gz_export_header", "gz_include_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) public_headers_no_gen = glob([ "include/gz/common/*.hh", @@ -59,3 +69,5 @@ cc_library( "@gtest//:gtest_main", ], ) for src in test_sources] + +add_lint_tests() diff --git a/geospatial/BUILD.bazel b/geospatial/BUILD.bazel index b01923fa8..317adb242 100644 --- a/geospatial/BUILD.bazel +++ b/geospatial/BUILD.bazel @@ -1,10 +1,20 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", "GZ_VISIBILITY", "gz_export_header", "gz_include_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) public_headers_no_gen = glob([ "include/gz/common/*.hh", @@ -42,6 +52,10 @@ cc_library( name = "geospatial", srcs = sources, hdrs = public_headers, + copts = [ + "-Wno-unused-value", + "-fexceptions", + ], includes = ["include"], visibility = GZ_VISIBILITY, deps = [ @@ -66,3 +80,5 @@ cc_library( "@gtest//:gtest_main", ], ) for src in test_sources] + +add_lint_tests() diff --git a/graphics/BUILD.bazel b/graphics/BUILD.bazel index 6d7b8822a..a22858100 100644 --- a/graphics/BUILD.bazel +++ b/graphics/BUILD.bazel @@ -1,10 +1,20 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", "GZ_VISIBILITY", "gz_export_header", "gz_include_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) public_headers_no_gen = glob([ "include/gz/common/*.hh", @@ -43,6 +53,10 @@ cc_library( name = "graphics", srcs = sources, hdrs = public_headers, + copts = [ + "-Wno-implicit-fallthrough", + "-Wno-unused-value", + ], includes = ["include"], visibility = GZ_VISIBILITY, deps = [ @@ -70,3 +84,5 @@ cc_library( "@gtest//:gtest_main", ], ) for src in test_sources] + +add_lint_tests() diff --git a/io/BUILD.bazel b/io/BUILD.bazel index a3371a451..5a99ada61 100644 --- a/io/BUILD.bazel +++ b/io/BUILD.bazel @@ -1,10 +1,20 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", "GZ_VISIBILITY", "gz_export_header", "gz_include_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) public_headers_no_gen = glob([ "include/gz/common/*.hh", @@ -42,6 +52,9 @@ cc_library( name = "io", srcs = sources, hdrs = public_headers, + copts = [ + "-fexceptions", + ], includes = ["include"], visibility = GZ_VISIBILITY, deps = [ @@ -63,3 +76,5 @@ cc_library( "@gtest//:gtest_main", ], ) for src in test_sources] + +add_lint_tests() diff --git a/profiler/BUILD.bazel b/profiler/BUILD.bazel index 8d9a6dd21..1bbd351e9 100644 --- a/profiler/BUILD.bazel +++ b/profiler/BUILD.bazel @@ -1,11 +1,21 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", "GZ_VISIBILITY", "cmake_configure_file", "gz_export_header", "gz_include_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) # Configuration for UNIX RMT_ENABLED = 1 @@ -97,6 +107,7 @@ cc_library( cc_test( name = "Profiler_Disabled_TEST", srcs = ["src/Profiler_Disabled_TEST.cc"], + copts = ["-Wno-macro-redefined"], defines = [ "GZ_PROFILER_ENABLE=0", "GZ_PROFILER_REMOTERY=0", @@ -117,3 +128,5 @@ cc_test( "@gtest//:gtest_main", ], ) + +add_lint_tests() diff --git a/test/BUILD.bazel b/test/BUILD.bazel index d62c70a6b..b0c103bbf 100644 --- a/test/BUILD.bazel +++ b/test/BUILD.bazel @@ -1,7 +1,17 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) cc_test( name = "INTEGRATION_console", @@ -35,8 +45,11 @@ cc_test( GZ_ROOT + "common", GZ_ROOT + "common/graphics", GZ_ROOT + "common/testing", + GZ_ROOT + "math", "@gtest//:gtest_main", ], ) exports_files(["data"]) + +add_lint_tests() diff --git a/testing/BUILD.bazel b/testing/BUILD.bazel index ac9111cd3..be0dd2c69 100644 --- a/testing/BUILD.bazel +++ b/testing/BUILD.bazel @@ -1,8 +1,18 @@ load( "@gz//bazel/skylark:build_defs.bzl", + "GZ_FEATURES", "GZ_ROOT", "gz_export_header", ) +load( + "@gz//bazel/lint:lint.bzl", + "add_lint_tests", +) + +package( + default_applicable_licenses = [GZ_ROOT + "common:license"], + features = GZ_FEATURES, +) gz_export_header( name = "include/gz/common/testing/Export.hh", @@ -25,6 +35,7 @@ cc_library( "src/Utils.cc", ], hdrs = public_headers, + copts = ["-fexceptions"], includes = ["include"], visibility = ["//visibility:public"], deps = [ @@ -67,3 +78,5 @@ cc_test( "@gtest//:gtest_main", ], ) + +add_lint_tests() From c4bfb3b9d03f96b65418076c40bc2c48446c7e7b Mon Sep 17 00:00:00 2001 From: "Addisu Z. Taddese" Date: Thu, 14 Mar 2024 17:47:11 -0500 Subject: [PATCH 35/37] Prepare for 5.5.1 (#586) Signed-off-by: Addisu Z. Taddese --- CMakeLists.txt | 2 +- Changelog.md | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 199b4031b..dbf83957f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.10.2 FATAL_ERROR) #============================================================================ # Initialize the project #============================================================================ -project(gz-common5 VERSION 5.5.0) +project(gz-common5 VERSION 5.5.1) set(GZ_COMMON_VER ${PROJECT_VERSION_MAJOR}) #============================================================================ diff --git a/Changelog.md b/Changelog.md index 0234f1e70..e6a90cae7 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,16 @@ ## Gazebo Common 5.x +## Gazebo Common 5.5.1 (2024-03-14) + +1. Various Bazel adjustments for linting + * [Pull request #582](https://github.com/gazebosim/gz-common/pull/582) + +1. Extend AssimpLoader to parse material transmission factor + * [Pull request #577](https://github.com/gazebosim/gz-common/pull/577) + +1. Fix noise issue that appears in certain image textures + * [Pull request #578](https://github.com/gazebosim/gz-common/pull/578) + ## Gazebo Common 5.5.0 (2024-02-26) 1. Be louder when graphics is missing for geospatial From e213c2bf91b0d55929b31c6a3b5232a74957c475 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Thu, 14 Mar 2024 15:06:09 -0700 Subject: [PATCH 36/37] Adds new function in MeshManager for performing convex decomposition (#585) Signed-off-by: Ian Chen --- graphics/include/gz/common/MeshManager.hh | 15 + graphics/src/MeshManager.cc | 115 +- graphics/src/MeshManager_TEST.cc | 48 + graphics/src/VHACD/LICENSE | 29 + graphics/src/VHACD/README.md | 3 + graphics/src/VHACD/VHACD.h | 8364 +++++++++++++++++++++ 6 files changed, 8572 insertions(+), 2 deletions(-) create mode 100644 graphics/src/VHACD/LICENSE create mode 100644 graphics/src/VHACD/README.md create mode 100644 graphics/src/VHACD/VHACD.h diff --git a/graphics/include/gz/common/MeshManager.hh b/graphics/include/gz/common/MeshManager.hh index 91a164a0f..61b19f3e3 100644 --- a/graphics/include/gz/common/MeshManager.hh +++ b/graphics/include/gz/common/MeshManager.hh @@ -280,6 +280,21 @@ namespace gz const Mesh *_m2, const int _operation, const gz::math::Pose3d &_offset = gz::math::Pose3d::Zero); + /// \brief Perform convex decomposition on a submesh. + /// The submesh is decomposed into multiple convex submeshes. The output + /// submeshes contain vertices and indices but texture coordinates + /// are not preserved. + /// \param[in] _subMesh Input submesh to decompose. + /// \param[in] _maxConvexHulls Maximum number of convex hull submeshes to + /// produce. + /// \param[in] _voxelResolution Voxel resolution to use. Higher value + /// produces more accurate shapes. + /// \return A vector of decomposed submeshes. + public: std::vector ConvexDecomposition( + const common::SubMesh &_subMesh, + std::size_t _maxConvexHulls = 16u, + std::size_t _voxelResolution = 200000u); + /// \brief Converts a vector of polylines into a table of vertices and /// a list of edges (each made of 2 points from the table of vertices. /// \param[in] _polys the polylines diff --git a/graphics/src/MeshManager.cc b/graphics/src/MeshManager.cc index 321d24d4b..f1c42ea41 100644 --- a/graphics/src/MeshManager.cc +++ b/graphics/src/MeshManager.cc @@ -16,11 +16,27 @@ */ #include -#include + +#include +#include #include +#include #include #include -#include +#include + +// Suppress warnings for VHACD +#ifndef _WIN32 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wfloat-equal" +#pragma GCC diagnostic ignored "-Wswitch-default" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif +#define ENABLE_VHACD_IMPLEMENTATION 1 +#include "VHACD/VHACD.h" +#ifndef _WIN32 +#pragma GCC diagnostic pop +#endif #ifndef _WIN32 #include "gz/common/GTSMeshUtils.hh" @@ -35,6 +51,7 @@ #include "gz/common/ColladaExporter.hh" #include "gz/common/OBJLoader.hh" #include "gz/common/STLLoader.hh" +#include "gz/common/Timer.hh" #include "gz/common/Util.hh" #include "gz/common/config.hh" @@ -1649,3 +1666,97 @@ void MeshManager::SetAssimpEnvs() this->dataPtr->forceAssimp = true; } } + +////////////////////////////////////////////////// +std::vector +MeshManager::ConvexDecomposition(const SubMesh &_subMesh, + std::size_t _maxConvexHulls, + std::size_t _voxelResolution) +{ + std::vector decomposed; + + auto vertexCount = _subMesh.VertexCount(); + auto indexCount = _subMesh.IndexCount(); + auto triangleCount = indexCount / 3u; + + std::vector points; + points.resize(vertexCount * 3u); + for (std::size_t i = 0; i < vertexCount; ++i) + { + std::size_t idx = i * 3u; + points[idx] = _subMesh.Vertex(i).X(); + points[idx + 1] = _subMesh.Vertex(i).Y(); + points[idx + 2] = _subMesh.Vertex(i).Z(); + } + + std::vector indices; + indices.resize(indexCount); + for (std::size_t i = 0; i < indexCount; ++i) + { + indices[i] = _subMesh.Index(i); + } + + VHACD::IVHACD *iface = VHACD::CreateVHACD_ASYNC(); + VHACD::IVHACD::Parameters parameters; + parameters.m_maxConvexHulls = _maxConvexHulls; + parameters.m_resolution = _voxelResolution; + parameters.m_asyncACD = true; + iface->Compute(points.data(), vertexCount, indices.data(), triangleCount, + parameters); + + common::Timer t; + t.Start(); + auto timeout = std::chrono::seconds(300); + while (!iface->IsReady()) + { + if (t.ElapsedTime() > timeout) + { + iface->Cancel(); + gzwarn << "Convex decomposition timed out. Process took more than " + << timeout.count() << " seconds. " << std::endl; + t.Stop(); + break; + } + std::this_thread::sleep_for(std::chrono::nanoseconds(10000)); + } + + if (!iface->GetNConvexHulls()) + { + gzwarn << "No convex hulls are generated " + << (!_subMesh.Name().empty() ? "from " : "") + << _subMesh.Name(); + return decomposed; + } + + for (std::size_t n = 0; n < iface->GetNConvexHulls(); ++n) + { + VHACD::IVHACD::ConvexHull ch; + iface->GetConvexHull(n, ch); + + SubMesh convexMesh; + for (std::size_t i = 0u; i < ch.m_points.size(); ++i) + { + const VHACD::Vertex &p = ch.m_points[i]; + gz::math::Vector3d vertex(p.mX, p.mY, p.mZ); + convexMesh.AddVertex(vertex); + + // add dummy normal - this will be overriden by the + // RecalculateNormals call + convexMesh.AddNormal(vertex); + } + + for (std::size_t i = 0u; i < ch.m_triangles.size(); ++i) + { + const VHACD::Triangle &tri = ch.m_triangles[i]; + convexMesh.AddIndex(tri.mI0); + convexMesh.AddIndex(tri.mI1); + convexMesh.AddIndex(tri.mI2); + } + convexMesh.RecalculateNormals(); + decomposed.push_back(convexMesh); + } + + iface->Release(); + + return decomposed; +} diff --git a/graphics/src/MeshManager_TEST.cc b/graphics/src/MeshManager_TEST.cc index fd6ff47bc..568e06611 100644 --- a/graphics/src/MeshManager_TEST.cc +++ b/graphics/src/MeshManager_TEST.cc @@ -23,6 +23,7 @@ #include "gz/common/config.hh" #include "gz/common/testing/AutoLogFixture.hh" +#include "gz/common/testing/TestPaths.hh" using namespace gz; @@ -289,4 +290,51 @@ TEST_F(MeshManager, Remove) mgr->RemoveAll(); EXPECT_FALSE(mgr->HasMesh("sphere")); } + +///////////////////////////////////////////////// +TEST_F(MeshManager, ConvexDecomposition) +{ + auto mgr = common::MeshManager::Instance(); + const common::Mesh *boxMesh = mgr->Load( + common::testing::TestFile("data", "box.dae")); + + ASSERT_NE(nullptr, boxMesh); + EXPECT_EQ(1u, boxMesh->SubMeshCount()); + + std::size_t maxConvexHulls = 4; + std::size_t resolution = 1000; + auto submesh = boxMesh->SubMeshByIndex(0u).lock(); + auto decomposed = std::move(mgr->ConvexDecomposition( + *(submesh.get()), maxConvexHulls, resolution)); + + // Decomposing a box should just produce a box + EXPECT_EQ(1u, decomposed.size()); + common::SubMesh &boxSubmesh = decomposed[0]; + // A convex hull of a box should contain exactly 8 vertices + EXPECT_EQ(8u, boxSubmesh.VertexCount()); + EXPECT_EQ(8u, boxSubmesh.NormalCount()); + EXPECT_EQ(36u, boxSubmesh.IndexCount()); + + const common::Mesh *drillMesh = mgr->Load( + common::testing::TestFile("data", "cordless_drill", + "meshes", "cordless_drill.dae")); + ASSERT_NE(nullptr, drillMesh); + EXPECT_EQ(1u, drillMesh->SubMeshCount()); + submesh = drillMesh->SubMeshByIndex(0u).lock(); + decomposed = std::move(mgr->ConvexDecomposition( + *(submesh.get()), maxConvexHulls, resolution)); + + // A drill should be decomposed into multiple submeshes + EXPECT_LT(1u, decomposed.size()); + EXPECT_GE(maxConvexHulls, decomposed.size()); + // Check submeshes are not empty + for (const auto &d : decomposed) + { + const common::SubMesh &drillSubmesh = d; + EXPECT_LT(3u, drillSubmesh.VertexCount()); + EXPECT_EQ(drillSubmesh.VertexCount(), drillSubmesh.NormalCount()); + EXPECT_LT(3u, drillSubmesh.IndexCount()); + } +} + #endif diff --git a/graphics/src/VHACD/LICENSE b/graphics/src/VHACD/LICENSE new file mode 100644 index 000000000..efc7f6439 --- /dev/null +++ b/graphics/src/VHACD/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2011, Khaled Mamou (kmamou at gmail dot com) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/graphics/src/VHACD/README.md b/graphics/src/VHACD/README.md new file mode 100644 index 000000000..b200bc3a8 --- /dev/null +++ b/graphics/src/VHACD/README.md @@ -0,0 +1,3 @@ +V-HACD: https://github.com/kmammou/v-hacd +commit: ab01cba71cc563b73bd3f0d574d346b2b797ea4b + diff --git a/graphics/src/VHACD/VHACD.h b/graphics/src/VHACD/VHACD.h new file mode 100644 index 000000000..6daec08df --- /dev/null +++ b/graphics/src/VHACD/VHACD.h @@ -0,0 +1,8364 @@ +/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) + All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + + 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#pragma once +#ifndef VHACD_H +# define VHACD_H + +// Please view this slide deck which describes usage and how the algorithm works. +// https://docs.google.com/presentation/d/1OZ4mtZYrGEC8qffqb8F7Le2xzufiqvaPpRbLHKKgTIM/edit?usp=sharing + +// VHACD is now a header only library. +// In just *one* of your CPP files *before* you include 'VHACD.h' you must declare +// #define ENABLE_VHACD_IMPLEMENTATION 1 +// This will compile the implementation code into your project. If you don't +// have this define, you will get link errors since the implementation code will +// not be present. If you define it more than once in your code base, you will get +// link errors due to a duplicate implementation. This is the same pattern used by +// ImGui and StbLib and other popular open source libraries. + +# define VHACD_VERSION_MAJOR 4 +# define VHACD_VERSION_MINOR 1 + +// Changes for version 4.1 +// +// Various minor tweaks mostly to the test application and some default values. + +// Changes for version 4.0 +// +// * The code has been significantly refactored to be cleaner and easier to maintain +// * All OpenCL related code removed +// * All Bullet code removed +// * All SIMD code removed +// * Old plane splitting code removed +// +// * The code is now delivered as a single header file 'VHACD.h' which has both the API +// * declaration as well as the implementation. Simply add '#define ENABLE_VHACD_IMPLEMENTATION 1' +// * to any CPP in your application prior to including 'VHACD.h'. Only do this in one CPP though. +// * If you do not have this define once, you will get link errors since the implementation code +// * will not be compiled in. If you have this define more than once, you are likely to get +// * duplicate symbol link errors. +// +// * Since the library is now delivered as a single header file, we do not provide binaries +// * or build scripts as these are not needed. +// +// * The old DebugView and test code has all been removed and replaced with a much smaller and +// * simpler test console application with some test meshes to work with. +// +// * The convex hull generation code has changed. The previous version came from Bullet. +// * However, the new version is courtesy of Julio Jerez, the author of the Newton +// * physics engine. His new version is faster and more numerically stable. +// +// * The code can now detect if the input mesh is, itself, already a convex object and +// * can early out. +// +// * Significant performance improvements have been made to the code and it is now much +// * faster, stable, and is easier to tune than previous versions. +// +// * A bug was fixed with the shrink wrapping code (project hull vertices) that could +// * sometime produce artifacts in the results. The new version uses a 'closest point' +// * algorithm that is more reliable. +// +// * You can now select which 'fill mode' to use. For perfectly closed meshes, the default +// * behavior using a flood fill generally works fine. However, some meshes have small +// * holes in them and therefore the flood fill will fail, treating the mesh as being +// * hollow. In these cases, you can use the 'raycast' fill option to determine which +// * parts of the voxelized mesh are 'inside' versus being 'outside'. Finally, there +// * are some rare instances where a user might actually want the mesh to be treated as +// * hollow, in which case you can pass in 'surface' only. +// * +// * A new optional virtual interface called 'IUserProfiler' was provided. +// * This allows the user to provide an optional profiling callback interface to assist in +// * diagnosing performance issues. This change was made by Danny Couture at Epic for the UE4 integration. +// * Some profiling macros were also declared in support of this feature. +// * +// * Another new optional virtual interface called 'IUserTaskRunner' was provided. +// * This interface is used to run logical 'tasks' in a background thread. If none is provided +// * then a default implementation using std::thread will be executed. +// * This change was made by Danny Couture at Epic to speed up the voxelization step. +// * + + + +// The history of V-HACD: +// +// The initial version was written by John W. Ratcliff and was called 'ACD' +// This version did not perform CSG operations on the source mesh, so if you +// recursed too deeply it would produce hollow results. +// +// The next version was written by Khaled Mamou and was called 'HACD' +// In this version Khaled tried to perform a CSG operation on the source +// mesh to produce more robust results. However, Khaled learned that the +// CSG library he was using had licensing issues so he started work on the +// next version. +// +// The next version was called 'V-HACD' because Khaled made the observation +// that plane splitting would be far easier to implement working in voxel space. +// +// V-HACD has been integrated into UE4, Blender, and a number of other projects. +// This new release, version4, is a significant refactor of the code to fix +// some bugs, improve performance, and to make the codebase easier to maintain +// going forward. + +#include +#include + +#include +#include +#include +#include + +namespace VHACD { + +struct Vertex +{ + double mX; + double mY; + double mZ; + + Vertex() = default; + Vertex(double x, double y, double z) : mX(x), mY(y), mZ(z) {} + + const double& operator[](size_t idx) const + { + switch(idx) + { + case 0: return mX; + case 1: return mY; + case 2: return mZ; + }; + return mX; + } +}; + +struct Triangle +{ + uint32_t mI0; + uint32_t mI1; + uint32_t mI2; + + Triangle() = default; + Triangle(uint32_t i0, uint32_t i1, uint32_t i2) : mI0(i0), mI1(i1), mI2(i2) {} +}; + +template +class Vector3 +{ +public: + /* + * Getters + */ + T& operator[](size_t i); + const T& operator[](size_t i) const; + T& GetX(); + T& GetY(); + T& GetZ(); + const T& GetX() const; + const T& GetY() const; + const T& GetZ() const; + + /* + * Normalize and norming + */ + T Normalize(); + Vector3 Normalized(); + T GetNorm() const; + T GetNormSquared() const; + int LongestAxis() const; + + /* + * Vector-vector operations + */ + Vector3& operator=(const Vector3& rhs); + Vector3& operator+=(const Vector3& rhs); + Vector3& operator-=(const Vector3& rhs); + + Vector3 CWiseMul(const Vector3& rhs) const; + Vector3 Cross(const Vector3& rhs) const; + T Dot(const Vector3& rhs) const; + Vector3 operator+(const Vector3& rhs) const; + Vector3 operator-(const Vector3& rhs) const; + + /* + * Vector-scalar operations + */ + Vector3& operator-=(T a); + Vector3& operator+=(T a); + Vector3& operator/=(T a); + Vector3& operator*=(T a); + + Vector3 operator*(T rhs) const; + Vector3 operator/(T rhs) const; + + /* + * Unary operations + */ + Vector3 operator-() const; + + /* + * Comparison operators + */ + bool operator<(const Vector3& rhs) const; + bool operator>(const Vector3& rhs) const; + + /* + * Returns true if all elements of *this are greater than or equal to all elements of rhs, coefficient wise + * LE is less than or equal + */ + bool CWiseAllGE(const Vector3& rhs) const; + bool CWiseAllLE(const Vector3& rhs) const; + + Vector3 CWiseMin(const Vector3& rhs) const; + Vector3 CWiseMax(const Vector3& rhs) const; + T MinCoeff() const; + T MaxCoeff() const; + + T MinCoeff(uint32_t& idx) const; + T MaxCoeff(uint32_t& idx) const; + + /* + * Constructors + */ + Vector3() = default; + Vector3(T a); + Vector3(T x, T y, T z); + Vector3(const Vector3& rhs); + ~Vector3() = default; + + template + Vector3(const Vector3& rhs); + + Vector3(const VHACD::Vertex&); + Vector3(const VHACD::Triangle&); + + operator VHACD::Vertex() const; + +private: + std::array m_data{ T(0.0) }; +}; + +typedef VHACD::Vector3 Vect3; + +struct BoundsAABB +{ + BoundsAABB() = default; + BoundsAABB(const std::vector& points); + BoundsAABB(const Vect3& min, + const Vect3& max); + + BoundsAABB Union(const BoundsAABB& b); + + bool Intersects(const BoundsAABB& b) const; + + double SurfaceArea() const; + double Volume() const; + + BoundsAABB Inflate(double ratio) const; + + VHACD::Vect3 ClosestPoint(const VHACD::Vect3& p) const; + + VHACD::Vect3& GetMin(); + VHACD::Vect3& GetMax(); + const VHACD::Vect3& GetMin() const; + const VHACD::Vect3& GetMax() const; + + VHACD::Vect3 GetSize() const; + VHACD::Vect3 GetCenter() const; + + VHACD::Vect3 m_min{ double(0.0) }; + VHACD::Vect3 m_max{ double(0.0) }; +}; + +/** +* This enumeration determines how the voxels as filled to create a solid +* object. The default should be 'FLOOD_FILL' which generally works fine +* for closed meshes. However, if the mesh is not watertight, then using +* RAYCAST_FILL may be preferable as it will determine if a voxel is part +* of the interior of the source mesh by raycasting around it. +* +* Finally, there are some cases where you might actually want a convex +* decomposition to treat the source mesh as being hollow. If that is the +* case you can pass in 'SURFACE_ONLY' and then the convex decomposition +* will converge only onto the 'skin' of the surface mesh. +*/ +enum class FillMode +{ + FLOOD_FILL, // This is the default behavior, after the voxelization step it uses a flood fill to determine 'inside' + // from 'outside'. However, meshes with holes can fail and create hollow results. + SURFACE_ONLY, // Only consider the 'surface', will create 'skins' with hollow centers. + RAYCAST_FILL, // Uses raycasting to determine inside from outside. +}; + +class IVHACD +{ +public: + /** + * This optional pure virtual interface is used to notify the caller of the progress + * of convex decomposition as well as a signal when it is complete when running in + * a background thread + */ + class IUserCallback + { + public: + virtual ~IUserCallback(){}; + + /** + * Notifies the application of the current state of the convex decomposition operation + * + * @param overallProgress : Total progress from 0-100% + * @param stageProgress : Progress of the current stage 0-100% + * @param stage : A text description of the current stage we are in + * @param operation : A text description of what operation is currently being performed. + */ + virtual void Update(const double overallProgress, + const double stageProgress, + const char* const stage, + const char* operation) = 0; + + // This is an optional user callback which is only called when running V-HACD asynchronously. + // This is a callback performed to notify the user that the + // convex decomposition background process is completed. This call back will occur from + // a different thread so the user should take that into account. + virtual void NotifyVHACDComplete() + { + } + }; + + /** + * Optional user provided pure virtual interface to be notified of warning or informational messages + */ + class IUserLogger + { + public: + virtual ~IUserLogger(){}; + virtual void Log(const char* const msg) = 0; + }; + + /** + * An optional user provided pure virtual interface to perform a background task. + * This was added by Danny Couture at Epic as they wanted to use their own + * threading system instead of the standard library version which is the default. + */ + class IUserTaskRunner + { + public: + virtual ~IUserTaskRunner(){}; + virtual void* StartTask(std::function func) = 0; + virtual void JoinTask(void* Task) = 0; + }; + + /** + * A simple class that represents a convex hull as a triangle mesh with + * double precision vertices. Polygons are not currently provided. + */ + class ConvexHull + { + public: + std::vector m_points; + std::vector m_triangles; + + double m_volume{ 0 }; // The volume of the convex hull + VHACD::Vect3 m_center{ 0, 0, 0 }; // The centroid of the convex hull + uint32_t m_meshId{ 0 }; // A unique id for this convex hull + VHACD::Vect3 mBmin; // Bounding box minimum of the AABB + VHACD::Vect3 mBmax; // Bounding box maximum of the AABB + }; + + /** + * This class provides the parameters controlling the convex decomposition operation + */ + class Parameters + { + public: + IUserCallback* m_callback{nullptr}; // Optional user provided callback interface for progress + IUserLogger* m_logger{nullptr}; // Optional user provided callback interface for log messages + IUserTaskRunner* m_taskRunner{nullptr}; // Optional user provided interface for creating tasks + uint32_t m_maxConvexHulls{ 64 }; // The maximum number of convex hulls to produce + uint32_t m_resolution{ 400000 }; // The voxel resolution to use + double m_minimumVolumePercentErrorAllowed{ 1 }; // if the voxels are within 1% of the volume of the hull, we consider this a close enough approximation + uint32_t m_maxRecursionDepth{ 10 }; // The maximum recursion depth + bool m_shrinkWrap{true}; // Whether or not to shrinkwrap the voxel positions to the source mesh on output + FillMode m_fillMode{ FillMode::FLOOD_FILL }; // How to fill the interior of the voxelized mesh + uint32_t m_maxNumVerticesPerCH{ 64 }; // The maximum number of vertices allowed in any output convex hull + bool m_asyncACD{ true }; // Whether or not to run asynchronously, taking advantage of additional cores + uint32_t m_minEdgeLength{ 2 }; // Once a voxel patch has an edge length of less than 4 on all 3 sides, we don't keep recursing + bool m_findBestPlane{ false }; // Whether or not to attempt to split planes along the best location. Experimental feature. False by default. + }; + + /** + * Will cause the convex decomposition operation to be canceled early. No results will be produced but the background operation will end as soon as it can. + */ + virtual void Cancel() = 0; + + /** + * Compute a convex decomposition of a triangle mesh using float vertices and the provided user parameters. + * + * @param points : The vertices of the source mesh as floats in the form of X1,Y1,Z1, X2,Y2,Z2,.. etc. + * @param countPoints : The number of vertices in the source mesh. + * @param triangles : The indices of triangles in the source mesh in the form of I1,I2,I3, .... + * @param countTriangles : The number of triangles in the source mesh + * @param params : The convex decomposition parameters to apply + * @return : Returns true if the convex decomposition operation can be started + */ + virtual bool Compute(const float* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) = 0; + + /** + * Compute a convex decomposition of a triangle mesh using double vertices and the provided user parameters. + * + * @param points : The vertices of the source mesh as floats in the form of X1,Y1,Z1, X2,Y2,Z2,.. etc. + * @param countPoints : The number of vertices in the source mesh. + * @param triangles : The indices of triangles in the source mesh in the form of I1,I2,I3, .... + * @param countTriangles : The number of triangles in the source mesh + * @param params : The convex decomposition parameters to apply + * @return : Returns true if the convex decomposition operation can be started + */ + virtual bool Compute(const double* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) = 0; + + /** + * Returns the number of convex hulls that were produced. + * + * @return : Returns the number of convex hulls produced, or zero if it failed or was canceled + */ + virtual uint32_t GetNConvexHulls() const = 0; + + /** + * Retrieves one of the convex hulls in the solution set + * + * @param index : Which convex hull to retrieve + * @param ch : The convex hull descriptor to return + * @return : Returns true if the convex hull exists and could be retrieved + */ + virtual bool GetConvexHull(const uint32_t index, + ConvexHull& ch) const = 0; + + /** + * Releases any memory allocated by the V-HACD class + */ + virtual void Clean() = 0; // release internally allocated memory + + /** + * Releases this instance of the V-HACD class + */ + virtual void Release() = 0; // release IVHACD + + // Will compute the center of mass of the convex hull decomposition results and return it + // in 'centerOfMass'. Returns false if the center of mass could not be computed. + virtual bool ComputeCenterOfMass(double centerOfMass[3]) const = 0; + + // In synchronous mode (non-multi-threaded) the state is always 'ready' + // In asynchronous mode, this returns true if the background thread is not still actively computing + // a new solution. In an asynchronous config the 'IsReady' call will report any update or log + // messages in the caller's current thread. + virtual bool IsReady() const + { + return true; + } + + /** + * At the request of LegionFu : out_look@foxmail.com + * This method will return which convex hull is closest to the source position. + * You can use this method to figure out, for example, which vertices in the original + * source mesh are best associated with which convex hull. + * + * @param pos : The input 3d position to test against + * + * @return : Returns which convex hull this position is closest to. + */ + virtual uint32_t findNearestConvexHull(const double pos[3], + double& distanceToHull) = 0; + +protected: + virtual ~IVHACD() + { + } +}; +/* + * Out of line definitions + */ + + template + T clamp(const T& v, const T& lo, const T& hi) + { + if (v < lo) + { + return lo; + } + if (v > hi) + { + return hi; + } + return v ; + } + +/* + * Getters + */ + template + inline T& Vector3::operator[](size_t i) + { + return m_data[i]; + } + + template + inline const T& Vector3::operator[](size_t i) const + { + return m_data[i]; + } + + template + inline T& Vector3::GetX() + { + return m_data[0]; + } + + template + inline T& Vector3::GetY() + { + return m_data[1]; + } + + template + inline T& Vector3::GetZ() + { + return m_data[2]; + } + + template + inline const T& Vector3::GetX() const + { + return m_data[0]; + } + + template + inline const T& Vector3::GetY() const + { + return m_data[1]; + } + + template + inline const T& Vector3::GetZ() const + { + return m_data[2]; + } + +/* + * Normalize and norming + */ + template + inline T Vector3::Normalize() + { + T n = GetNorm(); + if (n != T(0.0)) (*this) /= n; + return n; + } + + template + inline Vector3 Vector3::Normalized() + { + Vector3 ret = *this; + T n = GetNorm(); + if (n != T(0.0)) ret /= n; + return ret; + } + + template + inline T Vector3::GetNorm() const + { + return std::sqrt(GetNormSquared()); + } + + template + inline T Vector3::GetNormSquared() const + { + return this->Dot(*this); + } + + template + inline int Vector3::LongestAxis() const + { + auto it = std::max_element(m_data.begin(), m_data.end()); + return int(std::distance(m_data.begin(), it)); + } + +/* + * Vector-vector operations + */ + template + inline Vector3& Vector3::operator=(const Vector3& rhs) + { + GetX() = rhs.GetX(); + GetY() = rhs.GetY(); + GetZ() = rhs.GetZ(); + return *this; + } + + template + inline Vector3& Vector3::operator+=(const Vector3& rhs) + { + GetX() += rhs.GetX(); + GetY() += rhs.GetY(); + GetZ() += rhs.GetZ(); + return *this; + } + + template + inline Vector3& Vector3::operator-=(const Vector3& rhs) + { + GetX() -= rhs.GetX(); + GetY() -= rhs.GetY(); + GetZ() -= rhs.GetZ(); + return *this; + } + + template + inline Vector3 Vector3::CWiseMul(const Vector3& rhs) const + { + return Vector3(GetX() * rhs.GetX(), + GetY() * rhs.GetY(), + GetZ() * rhs.GetZ()); + } + + template + inline Vector3 Vector3::Cross(const Vector3& rhs) const + { + return Vector3(GetY() * rhs.GetZ() - GetZ() * rhs.GetY(), + GetZ() * rhs.GetX() - GetX() * rhs.GetZ(), + GetX() * rhs.GetY() - GetY() * rhs.GetX()); + } + + template + inline T Vector3::Dot(const Vector3& rhs) const + { + return GetX() * rhs.GetX() + + GetY() * rhs.GetY() + + GetZ() * rhs.GetZ(); + } + + template + inline Vector3 Vector3::operator+(const Vector3& rhs) const + { + return Vector3(GetX() + rhs.GetX(), + GetY() + rhs.GetY(), + GetZ() + rhs.GetZ()); + } + + template + inline Vector3 Vector3::operator-(const Vector3& rhs) const + { + return Vector3(GetX() - rhs.GetX(), + GetY() - rhs.GetY(), + GetZ() - rhs.GetZ()); + } + + template + inline Vector3 operator*(T lhs, const Vector3& rhs) + { + return Vector3(lhs * rhs.GetX(), + lhs * rhs.GetY(), + lhs * rhs.GetZ()); + } + +/* + * Vector-scalar operations + */ + template + inline Vector3& Vector3::operator-=(T a) + { + GetX() -= a; + GetY() -= a; + GetZ() -= a; + return *this; + } + + template + inline Vector3& Vector3::operator+=(T a) + { + GetX() += a; + GetY() += a; + GetZ() += a; + return *this; + } + + template + inline Vector3& Vector3::operator/=(T a) + { + GetX() /= a; + GetY() /= a; + GetZ() /= a; + return *this; + } + + template + inline Vector3& Vector3::operator*=(T a) + { + GetX() *= a; + GetY() *= a; + GetZ() *= a; + return *this; + } + + template + inline Vector3 Vector3::operator*(T rhs) const + { + return Vector3(GetX() * rhs, + GetY() * rhs, + GetZ() * rhs); + } + + template + inline Vector3 Vector3::operator/(T rhs) const + { + return Vector3(GetX() / rhs, + GetY() / rhs, + GetZ() / rhs); + } + +/* + * Unary operations + */ + template + inline Vector3 Vector3::operator-() const + { + return Vector3(-GetX(), + -GetY(), + -GetZ()); + } + +/* + * Comparison operators + */ + template + inline bool Vector3::operator<(const Vector3& rhs) const + { + if (GetX() == rhs.GetX()) + { + if (GetY() == rhs.GetY()) + { + return (GetZ() < rhs.GetZ()); + } + return (GetY() < rhs.GetY()); + } + return (GetX() < rhs.GetX()); + } + + template + inline bool Vector3::operator>(const Vector3& rhs) const + { + if (GetX() == rhs.GetX()) + { + if (GetY() == rhs.GetY()) + { + return (GetZ() > rhs.GetZ()); + } + return (GetY() > rhs.GetY()); + } + return (GetX() > rhs.GetZ()); + } + + template + inline bool Vector3::CWiseAllGE(const Vector3& rhs) const + { + return GetX() >= rhs.GetX() + && GetY() >= rhs.GetY() + && GetZ() >= rhs.GetZ(); + } + + template + inline bool Vector3::CWiseAllLE(const Vector3& rhs) const + { + return GetX() <= rhs.GetX() + && GetY() <= rhs.GetY() + && GetZ() <= rhs.GetZ(); + } + + template + inline Vector3 Vector3::CWiseMin(const Vector3& rhs) const + { + return Vector3(std::min(GetX(), rhs.GetX()), + std::min(GetY(), rhs.GetY()), + std::min(GetZ(), rhs.GetZ())); + } + + template + inline Vector3 Vector3::CWiseMax(const Vector3& rhs) const + { + return Vector3(std::max(GetX(), rhs.GetX()), + std::max(GetY(), rhs.GetY()), + std::max(GetZ(), rhs.GetZ())); + } + + template + inline T Vector3::MinCoeff() const + { + return *std::min_element(m_data.begin(), m_data.end()); + } + + template + inline T Vector3::MaxCoeff() const + { + return *std::max_element(m_data.begin(), m_data.end()); + } + + template + inline T Vector3::MinCoeff(uint32_t& idx) const + { + auto it = std::min_element(m_data.begin(), m_data.end()); + idx = uint32_t(std::distance(m_data.begin(), it)); + return *it; + } + + template + inline T Vector3::MaxCoeff(uint32_t& idx) const + { + auto it = std::max_element(m_data.begin(), m_data.end()); + idx = uint32_t(std::distance(m_data.begin(), it)); + return *it; + } + +/* + * Constructors + */ + template + inline Vector3::Vector3(T a) + : m_data{a, a, a} + { + } + + template + inline Vector3::Vector3(T x, T y, T z) + : m_data{x, y, z} + { + } + + template + inline Vector3::Vector3(const Vector3& rhs) + : m_data{rhs.m_data} + { + } + + template + template + inline Vector3::Vector3(const Vector3& rhs) + : m_data{T(rhs.GetX()), T(rhs.GetY()), T(rhs.GetZ())} + { + } + + template + inline Vector3::Vector3(const VHACD::Vertex& rhs) + : Vector3(rhs.mX, rhs.mY, rhs.mZ) + { + static_assert(std::is_same::value, "Vertex to Vector3 constructor only enabled for double"); + } + + template + inline Vector3::Vector3(const VHACD::Triangle& rhs) + : Vector3(rhs.mI0, rhs.mI1, rhs.mI2) + { + static_assert(std::is_same::value, "Triangle to Vector3 constructor only enabled for uint32_t"); + } + + template + inline Vector3::operator VHACD::Vertex() const + { + static_assert(std::is_same::value, "Vector3 to Vertex conversion only enable for double"); + return ::VHACD::Vertex( GetX(), GetY(), GetZ()); + } + +IVHACD* CreateVHACD(); // Create a synchronous (blocking) implementation of V-HACD +IVHACD* CreateVHACD_ASYNC(); // Create an asynchronous (non-blocking) implementation of V-HACD + +} // namespace VHACD + +#if ENABLE_VHACD_IMPLEMENTATION +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable:4100 4127 4189 4244 4456 4701 4702 4996) +#endif // _MSC_VER + +#ifdef __GNUC__ +#pragma GCC diagnostic push +// Minimum set of warnings used for cleanup +// #pragma GCC diagnostic warning "-Wall" +// #pragma GCC diagnostic warning "-Wextra" +// #pragma GCC diagnostic warning "-Wpedantic" +// #pragma GCC diagnostic warning "-Wold-style-cast" +// #pragma GCC diagnostic warning "-Wnon-virtual-dtor" +// #pragma GCC diagnostic warning "-Wshadow" +#endif // __GNUC__ + +// Scoped Timer +namespace VHACD { + +class Timer +{ +public: + Timer() + : m_startTime(std::chrono::high_resolution_clock::now()) + { + } + + void Reset() + { + m_startTime = std::chrono::high_resolution_clock::now(); + } + + double GetElapsedSeconds() + { + auto s = PeekElapsedSeconds(); + Reset(); + return s; + } + + double PeekElapsedSeconds() + { + auto now = std::chrono::high_resolution_clock::now(); + std::chrono::duration diff = now - m_startTime; + return diff.count(); + } + +private: + std::chrono::time_point m_startTime; +}; + +class ScopedTime +{ +public: + ScopedTime(const char* action, + VHACD::IVHACD::IUserLogger* logger) + : m_action(action) + , m_logger(logger) + { + m_timer.Reset(); + } + + ~ScopedTime() + { + double dtime = m_timer.GetElapsedSeconds(); + if( m_logger ) + { + char scratch[512]; + snprintf(scratch, + sizeof(scratch),"%s took %0.5f seconds", + m_action, + dtime); + m_logger->Log(scratch); + } + } + + const char* m_action{ nullptr }; + Timer m_timer; + VHACD::IVHACD::IUserLogger* m_logger{ nullptr }; +}; +BoundsAABB::BoundsAABB(const std::vector& points) + : m_min(points[0]) + , m_max(points[0]) +{ + for (uint32_t i = 1; i < points.size(); ++i) + { + const VHACD::Vertex& p = points[i]; + m_min = m_min.CWiseMin(p); + m_max = m_max.CWiseMax(p); + } +} + +BoundsAABB::BoundsAABB(const VHACD::Vect3& min, + const VHACD::Vect3& max) + : m_min(min) + , m_max(max) +{ +} + +BoundsAABB BoundsAABB::Union(const BoundsAABB& b) +{ + return BoundsAABB(GetMin().CWiseMin(b.GetMin()), + GetMax().CWiseMax(b.GetMax())); +} + +bool VHACD::BoundsAABB::Intersects(const VHACD::BoundsAABB& b) const +{ + if ( ( GetMin().GetX() > b.GetMax().GetX()) + || (b.GetMin().GetX() > GetMax().GetX())) + return false; + if ( ( GetMin().GetY() > b.GetMax().GetY()) + || (b.GetMin().GetY() > GetMax().GetY())) + return false; + if ( ( GetMin().GetZ() > b.GetMax().GetZ()) + || (b.GetMin().GetZ() > GetMax().GetZ())) + return false; + return true; +} + +double BoundsAABB::SurfaceArea() const +{ + VHACD::Vect3 d = GetMax() - GetMin(); + return double(2.0) * (d.GetX() * d.GetY() + d.GetX() * d.GetZ() + d.GetY() * d.GetZ()); +} + +double VHACD::BoundsAABB::Volume() const +{ + VHACD::Vect3 d = GetMax() - GetMin(); + return d.GetX() * d.GetY() * d.GetZ(); +} + +BoundsAABB VHACD::BoundsAABB::Inflate(double ratio) const +{ + double inflate = (GetMin() - GetMax()).GetNorm() * double(0.5) * ratio; + return BoundsAABB(GetMin() - inflate, + GetMax() + inflate); +} + +VHACD::Vect3 VHACD::BoundsAABB::ClosestPoint(const VHACD::Vect3& p) const +{ + return p.CWiseMax(GetMin()).CWiseMin(GetMax()); +} + +VHACD::Vect3& VHACD::BoundsAABB::GetMin() +{ + return m_min; +} + +VHACD::Vect3& VHACD::BoundsAABB::GetMax() +{ + return m_max; +} + +inline const VHACD::Vect3& VHACD::BoundsAABB::GetMin() const +{ + return m_min; +} + +const VHACD::Vect3& VHACD::BoundsAABB::GetMax() const +{ + return m_max; +} + +VHACD::Vect3 VHACD::BoundsAABB::GetSize() const +{ + return GetMax() - GetMin(); +} + +VHACD::Vect3 VHACD::BoundsAABB::GetCenter() const +{ + return (GetMin() + GetMax()) * double(0.5); +} + +/* + * Relies on three way comparison, which std::sort doesn't use + */ +template +void Sort(T* const array, int elements) +{ + const int batchSize = 8; + int stack[1024][2]; + + stack[0][0] = 0; + stack[0][1] = elements - 1; + int stackIndex = 1; + const dCompareKey comparator; + while (stackIndex) + { + stackIndex--; + int lo = stack[stackIndex][0]; + int hi = stack[stackIndex][1]; + if ((hi - lo) > batchSize) + { + int mid = (lo + hi) >> 1; + if (comparator.Compare(array[lo], array[mid]) > 0) + { + std::swap(array[lo], + array[mid]); + } + if (comparator.Compare(array[mid], array[hi]) > 0) + { + std::swap(array[mid], + array[hi]); + } + if (comparator.Compare(array[lo], array[mid]) > 0) + { + std::swap(array[lo], + array[mid]); + } + int i = lo + 1; + int j = hi - 1; + const T pivot(array[mid]); + do + { + while (comparator.Compare(array[i], pivot) < 0) + { + i++; + } + while (comparator.Compare(array[j], pivot) > 0) + { + j--; + } + + if (i <= j) + { + std::swap(array[i], + array[j]); + i++; + j--; + } + } while (i <= j); + + if (i < hi) + { + stack[stackIndex][0] = i; + stack[stackIndex][1] = hi; + stackIndex++; + } + if (lo < j) + { + stack[stackIndex][0] = lo; + stack[stackIndex][1] = j; + stackIndex++; + } + assert(stackIndex < int(sizeof(stack) / (2 * sizeof(stack[0][0])))); + } + } + + int stride = batchSize + 1; + if (elements < stride) + { + stride = elements; + } + for (int i = 1; i < stride; ++i) + { + if (comparator.Compare(array[0], array[i]) > 0) + { + std::swap(array[0], + array[i]); + } + } + + for (int i = 1; i < elements; ++i) + { + int j = i; + const T tmp(array[i]); + for (; comparator.Compare(array[j - 1], tmp) > 0; --j) + { + assert(j > 0); + array[j] = array[j - 1]; + } + array[j] = tmp; + } +} + +/* +Maintaining comment due to attribution +Purpose: + +TRIANGLE_AREA_3D computes the area of a triangle in 3D. + +Modified: + +22 April 1999 + +Author: + +John Burkardt + +Parameters: + +Input, double X1, Y1, Z1, X2, Y2, Z2, X3, Y3, Z3, the (getX,getY,getZ) +coordinates of the corners of the triangle. + +Output, double TRIANGLE_AREA_3D, the area of the triangle. +*/ +double ComputeArea(const VHACD::Vect3& p1, + const VHACD::Vect3& p2, + const VHACD::Vect3& p3) +{ + /* + Find the projection of (P3-P1) onto (P2-P1). + */ + double base = (p2 - p1).GetNorm(); + /* + The height of the triangle is the length of (P3-P1) after its + projection onto (P2-P1) has been subtracted. + */ + double height; + if (base == double(0.0)) + { + height = double(0.0); + } + else + { + double dot = (p3 - p1).Dot(p2 - p1); + double alpha = dot / (base * base); + + VHACD::Vect3 a = p3 - p1 - alpha * (p2 - p1); + height = a.GetNorm(); + } + + return double(0.5) * base * height; +} + +bool ComputeCentroid(const std::vector& points, + const std::vector& indices, + VHACD::Vect3& center) + +{ + bool ret = false; + if (points.size()) + { + center = VHACD::Vect3(0); + + VHACD::Vect3 numerator(0); + double denominator = 0; + + for (uint32_t i = 0; i < indices.size(); i++) + { + uint32_t i1 = indices[i].mI0; + uint32_t i2 = indices[i].mI1; + uint32_t i3 = indices[i].mI2; + + const VHACD::Vect3& p1 = points[i1]; + const VHACD::Vect3& p2 = points[i2]; + const VHACD::Vect3& p3 = points[i3]; + + // Compute the average of the sum of the three positions + VHACD::Vect3 sum = (p1 + p2 + p3) / 3; + + // Compute the area of this triangle + double area = ComputeArea(p1, + p2, + p3); + + numerator += (sum * area); + + denominator += area; + } + double recip = 1 / denominator; + center = numerator * recip; + ret = true; + } + return ret; +} + +double Determinant3x3(const std::array& matrix, + double& error) +{ + double det = double(0.0); + error = double(0.0); + + double a01xa12 = matrix[0].GetY() * matrix[1].GetZ(); + double a02xa11 = matrix[0].GetZ() * matrix[1].GetY(); + error += (std::abs(a01xa12) + std::abs(a02xa11)) * std::abs(matrix[2].GetX()); + det += (a01xa12 - a02xa11) * matrix[2].GetX(); + + double a00xa12 = matrix[0].GetX() * matrix[1].GetZ(); + double a02xa10 = matrix[0].GetZ() * matrix[1].GetX(); + error += (std::abs(a00xa12) + std::abs(a02xa10)) * std::abs(matrix[2].GetY()); + det -= (a00xa12 - a02xa10) * matrix[2].GetY(); + + double a00xa11 = matrix[0].GetX() * matrix[1].GetY(); + double a01xa10 = matrix[0].GetY() * matrix[1].GetX(); + error += (std::abs(a00xa11) + std::abs(a01xa10)) * std::abs(matrix[2].GetZ()); + det += (a00xa11 - a01xa10) * matrix[2].GetZ(); + + return det; +} + +double ComputeMeshVolume(const std::vector& vertices, + const std::vector& indices) +{ + double volume = 0; + for (uint32_t i = 0; i < indices.size(); i++) + { + const std::array m = { + vertices[indices[i].mI0], + vertices[indices[i].mI1], + vertices[indices[i].mI2] + }; + double placeholder; + volume += Determinant3x3(m, + placeholder); + } + + volume *= (double(1.0) / double(6.0)); + if (volume < 0) + volume *= -1; + return volume; +} + +/* + * To minimize memory allocations while maintaining pointer stability. + * Used in KdTreeNode and ConvexHull, as both use tree data structures that rely on pointer stability + * Neither rely on random access or iteration + * They just dump elements into a memory pool, then refer to pointers to the elements + * All elements are default constructed in NodeStorage's m_nodes array + */ +template +class NodeBundle +{ + struct NodeStorage { + bool IsFull() const; + + T& GetNextNode(); + + std::size_t m_index; + std::array m_nodes; + }; + + std::list m_list; + typename std::list::iterator m_head{ m_list.end() }; + +public: + T& GetNextNode(); + + T& GetFirstNode(); + + void Clear(); +}; + +template +bool NodeBundle::NodeStorage::IsFull() const +{ + return m_index == MaxBundleSize; +} + +template +T& NodeBundle::NodeStorage::GetNextNode() +{ + assert(m_index < MaxBundleSize); + T& ret = m_nodes[m_index]; + m_index++; + return ret; +} + +template +T& NodeBundle::GetNextNode() +{ + /* + * || short circuits, so doesn't dereference if m_bundle == m_bundleHead.end() + */ + if ( m_head == m_list.end() + || m_head->IsFull()) + { + m_head = m_list.emplace(m_list.end()); + } + + return m_head->GetNextNode(); +} + +template +T& NodeBundle::GetFirstNode() +{ + assert(m_head != m_list.end()); + return m_list.front().m_nodes[0]; +} + +template +void NodeBundle::Clear() +{ + m_list.clear(); +} + +/* + * Returns index of highest set bit in x + */ +inline int dExp2(int x) +{ + int exp; + for (exp = -1; x; x >>= 1) + { + exp++; + } + return exp; +} + +/* + * Reverses the order of the bits in v and returns the result + * Does not put fill any of the bits higher than the highest bit in v + * Only used to calculate index of ndNormalMap::m_normal when tessellating a triangle + */ +inline int dBitReversal(int v, + int base) +{ + int x = 0; + int power = dExp2(base) - 1; + do + { + x += (v & 1) << power; + v >>= 1; + power--; + } while (v); + return x; +} + +class Googol +{ + #define VHACD_GOOGOL_SIZE 4 +public: + Googol() = default; + Googol(double value); + + operator double() const; + Googol operator+(const Googol &A) const; + Googol operator-(const Googol &A) const; + Googol operator*(const Googol &A) const; + Googol operator/ (const Googol &A) const; + + Googol& operator+= (const Googol &A); + Googol& operator-= (const Googol &A); + + bool operator>(const Googol &A) const; + bool operator>=(const Googol &A) const; + bool operator<(const Googol &A) const; + bool operator<=(const Googol &A) const; + bool operator==(const Googol &A) const; + bool operator!=(const Googol &A) const; + + Googol Abs() const; + Googol Floor() const; + Googol InvSqrt() const; + Googol Sqrt() const; + + void ToString(char* const string) const; + +private: + void NegateMantissa(std::array& mantissa) const; + void CopySignedMantissa(std::array& mantissa) const; + int NormalizeMantissa(std::array& mantissa) const; + void ShiftRightMantissa(std::array& mantissa, + int bits) const; + uint64_t CheckCarrier(uint64_t a, uint64_t b) const; + + int LeadingZeros(uint64_t a) const; + void ExtendedMultiply(uint64_t a, + uint64_t b, + uint64_t& high, + uint64_t& low) const; + void ScaleMantissa(uint64_t* out, + uint64_t scale) const; + + int m_sign{ 0 }; + int m_exponent{ 0 }; + std::array m_mantissa{ 0 }; + +public: + static Googol m_zero; + static Googol m_one; + static Googol m_two; + static Googol m_three; + static Googol m_half; +}; + +Googol Googol::m_zero(double(0.0)); +Googol Googol::m_one(double(1.0)); +Googol Googol::m_two(double(2.0)); +Googol Googol::m_three(double(3.0)); +Googol Googol::m_half(double(0.5)); + +Googol::Googol(double value) +{ + int exp; + double mantissa = fabs(frexp(value, &exp)); + + m_exponent = exp; + m_sign = (value >= 0) ? 0 : 1; + + m_mantissa[0] = uint64_t(double(uint64_t(1) << 62) * mantissa); +} + +Googol::operator double() const +{ + double mantissa = (double(1.0) / double(uint64_t(1) << 62)) * double(m_mantissa[0]); + mantissa = ldexp(mantissa, m_exponent) * (m_sign ? double(-1.0) : double(1.0)); + return mantissa; +} + +Googol Googol::operator+(const Googol &A) const +{ + Googol tmp; + if (m_mantissa[0] && A.m_mantissa[0]) + { + std::array mantissa0; + std::array mantissa1; + std::array mantissa; + + CopySignedMantissa(mantissa0); + A.CopySignedMantissa(mantissa1); + + int exponentDiff = m_exponent - A.m_exponent; + int exponent = m_exponent; + if (exponentDiff > 0) + { + ShiftRightMantissa(mantissa1, + exponentDiff); + } + else if (exponentDiff < 0) + { + exponent = A.m_exponent; + ShiftRightMantissa(mantissa0, + -exponentDiff); + } + + uint64_t carrier = 0; + for (int i = VHACD_GOOGOL_SIZE - 1; i >= 0; i--) + { + uint64_t m0 = mantissa0[i]; + uint64_t m1 = mantissa1[i]; + mantissa[i] = m0 + m1 + carrier; + carrier = CheckCarrier(m0, m1) | CheckCarrier(m0 + m1, carrier); + } + + int sign = 0; + if (int64_t(mantissa[0]) < 0) + { + sign = 1; + NegateMantissa(mantissa); + } + + int bits = NormalizeMantissa(mantissa); + if (bits <= (-64 * VHACD_GOOGOL_SIZE)) + { + tmp.m_sign = 0; + tmp.m_exponent = 0; + } + else + { + tmp.m_sign = sign; + tmp.m_exponent = int(exponent + bits); + } + + tmp.m_mantissa = mantissa; + } + else if (A.m_mantissa[0]) + { + tmp = A; + } + else + { + tmp = *this; + } + + return tmp; +} + +Googol Googol::operator-(const Googol &A) const +{ + Googol tmp(A); + tmp.m_sign = !tmp.m_sign; + return *this + tmp; +} + +Googol Googol::operator*(const Googol &A) const +{ + if (m_mantissa[0] && A.m_mantissa[0]) + { + std::array mantissaAcc{ 0 }; + for (int i = VHACD_GOOGOL_SIZE - 1; i >= 0; i--) + { + uint64_t a = m_mantissa[i]; + if (a) + { + uint64_t mantissaScale[2 * VHACD_GOOGOL_SIZE] = { 0 }; + A.ScaleMantissa(&mantissaScale[i], a); + + uint64_t carrier = 0; + for (int j = 0; j < 2 * VHACD_GOOGOL_SIZE; j++) + { + const int k = 2 * VHACD_GOOGOL_SIZE - 1 - j; + uint64_t m0 = mantissaAcc[k]; + uint64_t m1 = mantissaScale[k]; + mantissaAcc[k] = m0 + m1 + carrier; + carrier = CheckCarrier(m0, m1) | CheckCarrier(m0 + m1, carrier); + } + } + } + + uint64_t carrier = 0; + int bits = LeadingZeros(mantissaAcc[0]) - 2; + for (int i = 0; i < 2 * VHACD_GOOGOL_SIZE; i++) + { + const int k = 2 * VHACD_GOOGOL_SIZE - 1 - i; + uint64_t a = mantissaAcc[k]; + mantissaAcc[k] = (a << uint64_t(bits)) | carrier; + carrier = a >> uint64_t(64 - bits); + } + + int exp = m_exponent + A.m_exponent - (bits - 2); + + Googol tmp; + tmp.m_sign = m_sign ^ A.m_sign; + tmp.m_exponent = exp; + for (std::size_t i = 0; i < tmp.m_mantissa.size(); ++i) + { + tmp.m_mantissa[i] = mantissaAcc[i]; + } + + return tmp; + } + return Googol(double(0.0)); +} + +Googol Googol::operator/(const Googol &A) const +{ + Googol tmp(double(1.0) / A); + tmp = tmp * (m_two - A * tmp); + tmp = tmp * (m_two - A * tmp); + bool test = false; + int passes = 0; + do + { + passes++; + Googol tmp0(tmp); + tmp = tmp * (m_two - A * tmp); + test = tmp0 == tmp; + } while (test && (passes < (2 * VHACD_GOOGOL_SIZE))); + return (*this) * tmp; +} + +Googol& Googol::operator+=(const Googol &A) +{ + *this = *this + A; + return *this; +} + +Googol& Googol::operator-=(const Googol &A) +{ + *this = *this - A; + return *this; +} + +bool Googol::operator>(const Googol &A) const +{ + Googol tmp(*this - A); + return double(tmp) > double(0.0); +} + +bool Googol::operator>=(const Googol &A) const +{ + Googol tmp(*this - A); + return double(tmp) >= double(0.0); +} + +bool Googol::operator<(const Googol &A) const +{ + Googol tmp(*this - A); + return double(tmp) < double(0.0); +} + +bool Googol::operator<=(const Googol &A) const +{ + Googol tmp(*this - A); + return double(tmp) <= double(0.0); +} + +bool Googol::operator==(const Googol &A) const +{ + return m_sign == A.m_sign + && m_exponent == A.m_exponent + && m_mantissa == A.m_mantissa; +} + +bool Googol::operator!=(const Googol &A) const +{ + return !(*this == A); +} + +Googol Googol::Abs() const +{ + Googol tmp(*this); + tmp.m_sign = 0; + return tmp; +} + +Googol Googol::Floor() const +{ + if (m_exponent < 1) + { + return Googol(double(0.0)); + } + int bits = m_exponent + 2; + int start = 0; + while (bits >= 64) + { + bits -= 64; + start++; + } + + Googol tmp(*this); + for (int i = VHACD_GOOGOL_SIZE - 1; i > start; i--) + { + tmp.m_mantissa[i] = 0; + } + // some compilers do no like this and I do not know why is that + //uint64_t mask = (-1LL) << (64 - bits); + uint64_t mask(~0ULL); + mask <<= (64 - bits); + tmp.m_mantissa[start] &= mask; + return tmp; +} + +Googol Googol::InvSqrt() const +{ + const Googol& me = *this; + Googol x(double(1.0) / sqrt(me)); + + int test = 0; + int passes = 0; + do + { + passes++; + Googol tmp(x); + x = m_half * x * (m_three - me * x * x); + test = (x != tmp); + } while (test && (passes < (2 * VHACD_GOOGOL_SIZE))); + return x; +} + +Googol Googol::Sqrt() const +{ + return *this * InvSqrt(); +} + +void Googol::ToString(char* const string) const +{ + Googol tmp(*this); + Googol base(double(10.0)); + while (double(tmp) > double(1.0)) + { + tmp = tmp / base; + } + + int index = 0; + while (tmp.m_mantissa[0]) + { + tmp = tmp * base; + Googol digit(tmp.Floor()); + tmp -= digit; + double val = digit; + string[index] = char(val) + '0'; + index++; + } + string[index] = 0; +} + +void Googol::NegateMantissa(std::array& mantissa) const +{ + uint64_t carrier = 1; + for (size_t i = mantissa.size() - 1; i < mantissa.size(); i--) + { + uint64_t a = ~mantissa[i] + carrier; + if (a) + { + carrier = 0; + } + mantissa[i] = a; + } +} + +void Googol::CopySignedMantissa(std::array& mantissa) const +{ + mantissa = m_mantissa; + if (m_sign) + { + NegateMantissa(mantissa); + } +} + +int Googol::NormalizeMantissa(std::array& mantissa) const +{ + int bits = 0; + if (int64_t(mantissa[0] * 2) < 0) + { + bits = 1; + ShiftRightMantissa(mantissa, 1); + } + else + { + while (!mantissa[0] && bits > (-64 * VHACD_GOOGOL_SIZE)) + { + bits -= 64; + for (int i = 1; i < VHACD_GOOGOL_SIZE; i++) { + mantissa[i - 1] = mantissa[i]; + } + mantissa[VHACD_GOOGOL_SIZE - 1] = 0; + } + + if (bits > (-64 * VHACD_GOOGOL_SIZE)) + { + int n = LeadingZeros(mantissa[0]) - 2; + if (n > 0) + { + uint64_t carrier = 0; + for (int i = VHACD_GOOGOL_SIZE - 1; i >= 0; i--) + { + uint64_t a = mantissa[i]; + mantissa[i] = (a << n) | carrier; + carrier = a >> (64 - n); + } + bits -= n; + } + else if (n < 0) + { + // this is very rare but it does happens, whee the leading zeros of the mantissa is an exact multiple of 64 + uint64_t carrier = 0; + int shift = -n; + for (int i = 0; i < VHACD_GOOGOL_SIZE; i++) + { + uint64_t a = mantissa[i]; + mantissa[i] = (a >> shift) | carrier; + carrier = a << (64 - shift); + } + bits -= n; + } + } + } + return bits; +} + +void Googol::ShiftRightMantissa(std::array& mantissa, + int bits) const +{ + uint64_t carrier = 0; + if (int64_t(mantissa[0]) < int64_t(0)) + { + carrier = uint64_t(-1); + } + + while (bits >= 64) + { + for (int i = VHACD_GOOGOL_SIZE - 2; i >= 0; i--) + { + mantissa[i + 1] = mantissa[i]; + } + mantissa[0] = carrier; + bits -= 64; + } + + if (bits > 0) + { + carrier <<= (64 - bits); + for (int i = 0; i < VHACD_GOOGOL_SIZE; i++) + { + uint64_t a = mantissa[i]; + mantissa[i] = (a >> bits) | carrier; + carrier = a << (64 - bits); + } + } +} + +uint64_t Googol::CheckCarrier(uint64_t a, uint64_t b) const +{ + return ((uint64_t(-1) - b) < a) ? uint64_t(1) : 0; +} + +int Googol::LeadingZeros(uint64_t a) const +{ + #define VHACD_COUNTBIT(mask, add) \ + do { \ + uint64_t test = a & mask; \ + n += test ? 0 : add; \ + a = test ? test : (a & ~mask); \ + } while (false) + + int n = 0; + VHACD_COUNTBIT(0xffffffff00000000LL, 32); + VHACD_COUNTBIT(0xffff0000ffff0000LL, 16); + VHACD_COUNTBIT(0xff00ff00ff00ff00LL, 8); + VHACD_COUNTBIT(0xf0f0f0f0f0f0f0f0LL, 4); + VHACD_COUNTBIT(0xccccccccccccccccLL, 2); + VHACD_COUNTBIT(0xaaaaaaaaaaaaaaaaLL, 1); + + return n; +} + +void Googol::ExtendedMultiply(uint64_t a, + uint64_t b, + uint64_t& high, + uint64_t& low) const +{ + uint64_t bLow = b & 0xffffffff; + uint64_t bHigh = b >> 32; + uint64_t aLow = a & 0xffffffff; + uint64_t aHigh = a >> 32; + + uint64_t l = bLow * aLow; + + uint64_t c1 = bHigh * aLow; + uint64_t c2 = bLow * aHigh; + uint64_t m = c1 + c2; + uint64_t carrier = CheckCarrier(c1, c2) << 32; + + uint64_t h = bHigh * aHigh + carrier; + + uint64_t ml = m << 32; + uint64_t ll = l + ml; + uint64_t mh = (m >> 32) + CheckCarrier(l, ml); + uint64_t hh = h + mh; + + low = ll; + high = hh; +} + +void Googol::ScaleMantissa(uint64_t* dst, + uint64_t scale) const +{ + uint64_t carrier = 0; + for (int i = VHACD_GOOGOL_SIZE - 1; i >= 0; i--) + { + if (m_mantissa[i]) + { + uint64_t low; + uint64_t high; + ExtendedMultiply(scale, + m_mantissa[i], + high, + low); + uint64_t acc = low + carrier; + carrier = CheckCarrier(low, + carrier); + carrier += high; + dst[i + 1] = acc; + } + else + { + dst[i + 1] = carrier; + carrier = 0; + } + + } + dst[0] = carrier; +} + +Googol Determinant3x3(const std::array, 3>& matrix) +{ + Googol det = double(0.0); + + Googol a01xa12 = matrix[0].GetY() * matrix[1].GetZ(); + Googol a02xa11 = matrix[0].GetZ() * matrix[1].GetY(); + det += (a01xa12 - a02xa11) * matrix[2].GetX(); + + Googol a00xa12 = matrix[0].GetX() * matrix[1].GetZ(); + Googol a02xa10 = matrix[0].GetZ() * matrix[1].GetX(); + det -= (a00xa12 - a02xa10) * matrix[2].GetY(); + + Googol a00xa11 = matrix[0].GetX() * matrix[1].GetY(); + Googol a01xa10 = matrix[0].GetY() * matrix[1].GetX(); + det += (a00xa11 - a01xa10) * matrix[2].GetZ(); + return det; +} + +class HullPlane : public VHACD::Vect3 +{ +public: + HullPlane(const HullPlane&) = default; + HullPlane(double x, + double y, + double z, + double w); + + HullPlane(const VHACD::Vect3& p, + double w); + + HullPlane(const VHACD::Vect3& p0, + const VHACD::Vect3& p1, + const VHACD::Vect3& p2); + + HullPlane Scale(double s) const; + + HullPlane& operator=(const HullPlane& rhs); + + double Evalue(const VHACD::Vect3 &point) const; + + double& GetW(); + const double& GetW() const; + +private: + double m_w; +}; + +HullPlane::HullPlane(double x, + double y, + double z, + double w) + : VHACD::Vect3(x, y, z) + , m_w(w) +{ +} + +HullPlane::HullPlane(const VHACD::Vect3& p, + double w) + : VHACD::Vect3(p) + , m_w(w) +{ +} + +HullPlane::HullPlane(const VHACD::Vect3& p0, + const VHACD::Vect3& p1, + const VHACD::Vect3& p2) + : VHACD::Vect3((p1 - p0).Cross(p2 - p0)) + , m_w(-Dot(p0)) +{ +} + +HullPlane HullPlane::Scale(double s) const +{ + return HullPlane(*this * s, + m_w * s); +} + +HullPlane& HullPlane::operator=(const HullPlane& rhs) +{ + GetX() = rhs.GetX(); + GetY() = rhs.GetY(); + GetZ() = rhs.GetZ(); + m_w = rhs.m_w; + return *this; +} + +double HullPlane::Evalue(const VHACD::Vect3& point) const +{ + return Dot(point) + m_w; +} + +double& HullPlane::GetW() +{ + return m_w; +} + +const double& HullPlane::GetW() const +{ + return m_w; +} + +class ConvexHullFace +{ +public: + ConvexHullFace() = default; + double Evalue(const std::vector& pointArray, + const VHACD::Vect3& point) const; + HullPlane GetPlaneEquation(const std::vector& pointArray, + bool& isValid) const; + + std::array m_index; +private: + int m_mark{ 0 }; + std::array::iterator, 3> m_twin; + + friend class ConvexHull; +}; + +double ConvexHullFace::Evalue(const std::vector& pointArray, + const VHACD::Vect3& point) const +{ + const VHACD::Vect3& p0 = pointArray[m_index[0]]; + const VHACD::Vect3& p1 = pointArray[m_index[1]]; + const VHACD::Vect3& p2 = pointArray[m_index[2]]; + + std::array matrix = { p2 - p0, p1 - p0, point - p0 }; + double error; + double det = Determinant3x3(matrix, + error); + + // the code use double, however the threshold for accuracy test is the machine precision of a float. + // by changing this to a smaller number, the code should run faster since many small test will be considered valid + // the precision must be a power of two no smaller than the machine precision of a double, (1<<48) + // float64(1<<30) can be a good value + + // double precision = double (1.0f) / double (1<<30); + double precision = double(1.0) / double(1 << 24); + double errbound = error * precision; + if (fabs(det) > errbound) + { + return det; + } + + const VHACD::Vector3 p0g = pointArray[m_index[0]]; + const VHACD::Vector3 p1g = pointArray[m_index[1]]; + const VHACD::Vector3 p2g = pointArray[m_index[2]]; + const VHACD::Vector3 pointg = point; + std::array, 3> exactMatrix = { p2g - p0g, p1g - p0g, pointg - p0g }; + return Determinant3x3(exactMatrix); +} + +HullPlane ConvexHullFace::GetPlaneEquation(const std::vector& pointArray, + bool& isvalid) const +{ + const VHACD::Vect3& p0 = pointArray[m_index[0]]; + const VHACD::Vect3& p1 = pointArray[m_index[1]]; + const VHACD::Vect3& p2 = pointArray[m_index[2]]; + HullPlane plane(p0, p1, p2); + + isvalid = false; + double mag2 = plane.Dot(plane); + if (mag2 > double(1.0e-16)) + { + isvalid = true; + plane = plane.Scale(double(1.0) / sqrt(mag2)); + } + return plane; +} + +class ConvexHullVertex : public VHACD::Vect3 +{ +public: + ConvexHullVertex() = default; + ConvexHullVertex(const ConvexHullVertex&) = default; + ConvexHullVertex& operator=(const ConvexHullVertex& rhs) = default; + using VHACD::Vect3::operator=; + + int m_mark{ 0 }; +}; + + +class ConvexHullAABBTreeNode +{ + #define VHACD_CONVEXHULL_3D_VERTEX_CLUSTER_SIZE 8 +public: + ConvexHullAABBTreeNode() = default; + ConvexHullAABBTreeNode(ConvexHullAABBTreeNode* parent); + + VHACD::Vect3 m_box[2]; + ConvexHullAABBTreeNode* m_left{ nullptr }; + ConvexHullAABBTreeNode* m_right{ nullptr }; + ConvexHullAABBTreeNode* m_parent{ nullptr }; + + size_t m_count; + std::array m_indices; +}; + +ConvexHullAABBTreeNode::ConvexHullAABBTreeNode(ConvexHullAABBTreeNode* parent) + : m_parent(parent) +{ +} + +class ConvexHull +{ + class ndNormalMap; + +public: + ConvexHull(const ConvexHull& source); + ConvexHull(const std::vector<::VHACD::Vertex>& vertexCloud, + double distTol, + int maxVertexCount = 0x7fffffff); + ~ConvexHull() = default; + + const std::vector& GetVertexPool() const; + + const std::list& GetList() const { return m_list; } + +private: + void BuildHull(const std::vector<::VHACD::Vertex>& vertexCloud, + double distTol, + int maxVertexCount); + + void GetUniquePoints(std::vector& points); + int InitVertexArray(std::vector& points, + NodeBundle& memoryPool); + + ConvexHullAABBTreeNode* BuildTreeNew(std::vector& points, + std::vector& memoryPool) const; + ConvexHullAABBTreeNode* BuildTreeOld(std::vector& points, + NodeBundle& memoryPool); + ConvexHullAABBTreeNode* BuildTreeRecurse(ConvexHullAABBTreeNode* const parent, + ConvexHullVertex* const points, + int count, + int baseIndex, + NodeBundle& memoryPool) const; + + std::list::iterator AddFace(int i0, + int i1, + int i2); + + void CalculateConvexHull3D(ConvexHullAABBTreeNode* vertexTree, + std::vector& points, + int count, + double distTol, + int maxVertexCount); + + int SupportVertex(ConvexHullAABBTreeNode** const tree, + const std::vector& points, + const VHACD::Vect3& dir, + const bool removeEntry = true) const; + double TetrahedrumVolume(const VHACD::Vect3& p0, + const VHACD::Vect3& p1, + const VHACD::Vect3& p2, + const VHACD::Vect3& p3) const; + + std::list m_list; + VHACD::Vect3 m_aabbP0{ 0 }; + VHACD::Vect3 m_aabbP1{ 0 }; + double m_diag{ 0.0 }; + std::vector m_points; +}; + +class ConvexHull::ndNormalMap +{ +public: + ndNormalMap(); + + static const ndNormalMap& GetNormalMap(); + + void TessellateTriangle(int level, + const VHACD::Vect3& p0, + const VHACD::Vect3& p1, + const VHACD::Vect3& p2, + int& count); + + std::array m_normal; + int m_count{ 128 }; +}; + +const ConvexHull::ndNormalMap& ConvexHull::ndNormalMap::GetNormalMap() +{ + static ndNormalMap normalMap; + return normalMap; +} + +void ConvexHull::ndNormalMap::TessellateTriangle(int level, + const VHACD::Vect3& p0, + const VHACD::Vect3& p1, + const VHACD::Vect3& p2, + int& count) +{ + if (level) + { + assert(fabs(p0.Dot(p0) - double(1.0)) < double(1.0e-4)); + assert(fabs(p1.Dot(p1) - double(1.0)) < double(1.0e-4)); + assert(fabs(p2.Dot(p2) - double(1.0)) < double(1.0e-4)); + VHACD::Vect3 p01(p0 + p1); + VHACD::Vect3 p12(p1 + p2); + VHACD::Vect3 p20(p2 + p0); + + p01 = p01 * (double(1.0) / p01.GetNorm()); + p12 = p12 * (double(1.0) / p12.GetNorm()); + p20 = p20 * (double(1.0) / p20.GetNorm()); + + assert(fabs(p01.GetNormSquared() - double(1.0)) < double(1.0e-4)); + assert(fabs(p12.GetNormSquared() - double(1.0)) < double(1.0e-4)); + assert(fabs(p20.GetNormSquared() - double(1.0)) < double(1.0e-4)); + + TessellateTriangle(level - 1, p0, p01, p20, count); + TessellateTriangle(level - 1, p1, p12, p01, count); + TessellateTriangle(level - 1, p2, p20, p12, count); + TessellateTriangle(level - 1, p01, p12, p20, count); + } + else + { + /* + * This is just m_normal[index] = n.Normalized(), but due to tiny floating point errors, causes + * different outputs, so I'm leaving it + */ + HullPlane n(p0, p1, p2); + n = n.Scale(double(1.0) / n.GetNorm()); + n.GetW() = double(0.0); + int index = dBitReversal(count, + int(m_normal.size())); + m_normal[index] = n; + count++; + assert(count <= int(m_normal.size())); + } +} + +ConvexHull::ndNormalMap::ndNormalMap() +{ + VHACD::Vect3 p0(double( 1.0), double( 0.0), double( 0.0)); + VHACD::Vect3 p1(double(-1.0), double( 0.0), double( 0.0)); + VHACD::Vect3 p2(double( 0.0), double( 1.0), double( 0.0)); + VHACD::Vect3 p3(double( 0.0), double(-1.0), double( 0.0)); + VHACD::Vect3 p4(double( 0.0), double( 0.0), double( 1.0)); + VHACD::Vect3 p5(double( 0.0), double( 0.0), double(-1.0)); + + int count = 0; + int subdivisions = 2; + TessellateTriangle(subdivisions, p4, p0, p2, count); + TessellateTriangle(subdivisions, p0, p5, p2, count); + TessellateTriangle(subdivisions, p5, p1, p2, count); + TessellateTriangle(subdivisions, p1, p4, p2, count); + TessellateTriangle(subdivisions, p0, p4, p3, count); + TessellateTriangle(subdivisions, p5, p0, p3, count); + TessellateTriangle(subdivisions, p1, p5, p3, count); + TessellateTriangle(subdivisions, p4, p1, p3, count); +} + +ConvexHull::ConvexHull(const std::vector<::VHACD::Vertex>& vertexCloud, + double distTol, + int maxVertexCount) +{ + if (vertexCloud.size() >= 4) + { + BuildHull(vertexCloud, + distTol, + maxVertexCount); + } +} + +const std::vector& ConvexHull::GetVertexPool() const +{ + return m_points; +} + +void ConvexHull::BuildHull(const std::vector<::VHACD::Vertex>& vertexCloud, + double distTol, + int maxVertexCount) +{ + size_t treeCount = vertexCloud.size() / (VHACD_CONVEXHULL_3D_VERTEX_CLUSTER_SIZE >> 1); + treeCount = std::max(treeCount, size_t(4)) * 2; + + std::vector points(vertexCloud.size()); + /* + * treePool provides a memory pool for the AABB tree + * Each node is either a leaf or non-leaf node + * Non-leaf nodes have up to 8 vertices + * Vertices are specified by the m_indices array and are accessed via the points array + * + * Later on in ConvexHull::SupportVertex, the tree is used directly + * It differentiates between ConvexHullAABBTreeNode and ConvexHull3DPointCluster by whether the m_left and m_right + * pointers are null or not + * + * Pointers have to be stable + */ + NodeBundle treePool; + for (size_t i = 0; i < vertexCloud.size(); ++i) + { + points[i] = VHACD::Vect3(vertexCloud[i]); + } + int count = InitVertexArray(points, + treePool); + + if (m_points.size() >= 4) + { + CalculateConvexHull3D(&treePool.GetFirstNode(), + points, + count, + distTol, + maxVertexCount); + } +} + +void ConvexHull::GetUniquePoints(std::vector& points) +{ + class CompareVertex + { + public: + int Compare(const ConvexHullVertex& elementA, const ConvexHullVertex& elementB) const + { + for (int i = 0; i < 3; i++) + { + if (elementA[i] < elementB[i]) + { + return -1; + } + else if (elementA[i] > elementB[i]) + { + return 1; + } + } + return 0; + } + }; + + int count = int(points.size()); + Sort(points.data(), + count); + + int indexCount = 0; + CompareVertex compareVertex; + for (int i = 1; i < count; ++i) + { + for (; i < count; ++i) + { + if (compareVertex.Compare(points[indexCount], points[i])) + { + indexCount++; + points[indexCount] = points[i]; + break; + } + } + } + points.resize(indexCount + 1); +} + +ConvexHullAABBTreeNode* ConvexHull::BuildTreeRecurse(ConvexHullAABBTreeNode* const parent, + ConvexHullVertex* const points, + int count, + int baseIndex, + NodeBundle& memoryPool) const +{ + ConvexHullAABBTreeNode* tree = nullptr; + + assert(count); + VHACD::Vect3 minP( double(1.0e15)); + VHACD::Vect3 maxP(-double(1.0e15)); + if (count <= VHACD_CONVEXHULL_3D_VERTEX_CLUSTER_SIZE) + { + ConvexHullAABBTreeNode& clump = memoryPool.GetNextNode(); + + clump.m_count = count; + for (int i = 0; i < count; ++i) + { + clump.m_indices[i] = i + baseIndex; + + const VHACD::Vect3& p = points[i]; + minP = minP.CWiseMin(p); + maxP = maxP.CWiseMax(p); + } + + clump.m_left = nullptr; + clump.m_right = nullptr; + tree = &clump; + } + else + { + VHACD::Vect3 median(0); + VHACD::Vect3 varian(0); + for (int i = 0; i < count; ++i) + { + const VHACD::Vect3& p = points[i]; + minP = minP.CWiseMin(p); + maxP = maxP.CWiseMax(p); + median += p; + varian += p.CWiseMul(p); + } + + varian = varian * double(count) - median.CWiseMul(median); + int index = 0; + double maxVarian = double(-1.0e10); + for (int i = 0; i < 3; ++i) + { + if (varian[i] > maxVarian) + { + index = i; + maxVarian = varian[i]; + } + } + VHACD::Vect3 center(median * (double(1.0) / double(count))); + + double test = center[index]; + + int i0 = 0; + int i1 = count - 1; + do + { + for (; i0 <= i1; i0++) + { + double val = points[i0][index]; + if (val > test) + { + break; + } + } + + for (; i1 >= i0; i1--) + { + double val = points[i1][index]; + if (val < test) + { + break; + } + } + + if (i0 < i1) + { + std::swap(points[i0], + points[i1]); + i0++; + i1--; + } + } while (i0 <= i1); + + if (i0 == 0) + { + i0 = count / 2; + } + if (i0 >= (count - 1)) + { + i0 = count / 2; + } + + tree = &memoryPool.GetNextNode(); + + assert(i0); + assert(count - i0); + + tree->m_left = BuildTreeRecurse(tree, + points, + i0, + baseIndex, + memoryPool); + tree->m_right = BuildTreeRecurse(tree, + &points[i0], + count - i0, + i0 + baseIndex, + memoryPool); + } + + assert(tree); + tree->m_parent = parent; + /* + * WARNING: Changing the compiler conversion of 1.0e-3f changes the results of the convex decomposition + * Inflate the tree's bounding box slightly + */ + tree->m_box[0] = minP - VHACD::Vect3(double(1.0e-3f)); + tree->m_box[1] = maxP + VHACD::Vect3(double(1.0e-3f)); + return tree; +} + +ConvexHullAABBTreeNode* ConvexHull::BuildTreeOld(std::vector& points, + NodeBundle& memoryPool) +{ + GetUniquePoints(points); + int count = int(points.size()); + if (count < 4) + { + return nullptr; + } + return BuildTreeRecurse(nullptr, + points.data(), + count, + 0, + memoryPool); +} + +ConvexHullAABBTreeNode* ConvexHull::BuildTreeNew(std::vector& points, + std::vector& memoryPool) const +{ + class dCluster + { + public: + VHACD::Vect3 m_sum{ double(0.0) }; + VHACD::Vect3 m_sum2{ double(0.0) }; + int m_start{ 0 }; + int m_count{ 0 }; + }; + + dCluster firstCluster; + firstCluster.m_count = int(points.size()); + + for (int i = 0; i < firstCluster.m_count; ++i) + { + const VHACD::Vect3& p = points[i]; + firstCluster.m_sum += p; + firstCluster.m_sum2 += p.CWiseMul(p); + } + + int baseCount = 0; + const int clusterSize = 16; + + if (firstCluster.m_count > clusterSize) + { + dCluster spliteStack[128]; + spliteStack[0] = firstCluster; + size_t stack = 1; + + while (stack) + { + stack--; + dCluster cluster (spliteStack[stack]); + + const VHACD::Vect3 origin(cluster.m_sum * (double(1.0) / cluster.m_count)); + const VHACD::Vect3 variance2(cluster.m_sum2 * (double(1.0) / cluster.m_count) - origin.CWiseMul(origin)); + double maxVariance2 = variance2.MaxCoeff(); + + if ( (cluster.m_count <= clusterSize) + || (stack > (sizeof(spliteStack) / sizeof(spliteStack[0]) - 4)) + || (maxVariance2 < 1.e-4f)) + { + // no sure if this is beneficial, + // the array is so small that seem too much overhead + //int maxIndex = 0; + //double min_x = 1.0e20f; + //for (int i = 0; i < cluster.m_count; ++i) + //{ + // if (points[cluster.m_start + i].getX() < min_x) + // { + // maxIndex = i; + // min_x = points[cluster.m_start + i].getX(); + // } + //} + //Swap(points[cluster.m_start], points[cluster.m_start + maxIndex]); + // + //for (int i = 2; i < cluster.m_count; ++i) + //{ + // int j = i; + // ConvexHullVertex tmp(points[cluster.m_start + i]); + // for (; points[cluster.m_start + j - 1].getX() > tmp.getX(); --j) + // { + // assert(j > 0); + // points[cluster.m_start + j] = points[cluster.m_start + j - 1]; + // } + // points[cluster.m_start + j] = tmp; + //} + + int count = cluster.m_count; + for (int i = cluster.m_count - 1; i > 0; --i) + { + for (int j = i - 1; j >= 0; --j) + { + VHACD::Vect3 error(points[cluster.m_start + j] - points[cluster.m_start + i]); + double mag2 = error.Dot(error); + if (mag2 < double(1.0e-6)) + { + points[cluster.m_start + j] = points[cluster.m_start + i]; + count--; + break; + } + } + } + + assert(baseCount <= cluster.m_start); + for (int i = 0; i < count; ++i) + { + points[baseCount] = points[cluster.m_start + i]; + baseCount++; + } + } + else + { + const int firstSortAxis = variance2.LongestAxis(); + double axisVal = origin[firstSortAxis]; + + int i0 = 0; + int i1 = cluster.m_count - 1; + + const int start = cluster.m_start; + while (i0 < i1) + { + while ( (points[start + i0][firstSortAxis] <= axisVal) + && (i0 < i1)) + { + ++i0; + }; + + while ( (points[start + i1][firstSortAxis] > axisVal) + && (i0 < i1)) + { + --i1; + } + + assert(i0 <= i1); + if (i0 < i1) + { + std::swap(points[start + i0], + points[start + i1]); + ++i0; + --i1; + } + } + + while ( (points[start + i0][firstSortAxis] <= axisVal) + && (i0 < cluster.m_count)) + { + ++i0; + }; + + #ifdef _DEBUG + for (int i = 0; i < i0; ++i) + { + assert(points[start + i][firstSortAxis] <= axisVal); + } + + for (int i = i0; i < cluster.m_count; ++i) + { + assert(points[start + i][firstSortAxis] > axisVal); + } + #endif + + VHACD::Vect3 xc(0); + VHACD::Vect3 x2c(0); + for (int i = 0; i < i0; ++i) + { + const VHACD::Vect3& x = points[start + i]; + xc += x; + x2c += x.CWiseMul(x); + } + + dCluster cluster_i1(cluster); + cluster_i1.m_start = start + i0; + cluster_i1.m_count = cluster.m_count - i0; + cluster_i1.m_sum -= xc; + cluster_i1.m_sum2 -= x2c; + spliteStack[stack] = cluster_i1; + assert(cluster_i1.m_count > 0); + stack++; + + dCluster cluster_i0(cluster); + cluster_i0.m_start = start; + cluster_i0.m_count = i0; + cluster_i0.m_sum = xc; + cluster_i0.m_sum2 = x2c; + assert(cluster_i0.m_count > 0); + spliteStack[stack] = cluster_i0; + stack++; + } + } + } + + points.resize(baseCount); + if (baseCount < 4) + { + return nullptr; + } + + VHACD::Vect3 sum(0); + VHACD::Vect3 sum2(0); + VHACD::Vect3 minP(double( 1.0e15)); + VHACD::Vect3 maxP(double(-1.0e15)); + class dTreeBox + { + public: + VHACD::Vect3 m_min; + VHACD::Vect3 m_max; + VHACD::Vect3 m_sum; + VHACD::Vect3 m_sum2; + ConvexHullAABBTreeNode* m_parent; + ConvexHullAABBTreeNode** m_child; + int m_start; + int m_count; + }; + + for (int i = 0; i < baseCount; ++i) + { + const VHACD::Vect3& p = points[i]; + sum += p; + sum2 += p.CWiseMul(p); + minP = minP.CWiseMin(p); + maxP = maxP.CWiseMax(p); + } + + dTreeBox treeBoxStack[128]; + treeBoxStack[0].m_start = 0; + treeBoxStack[0].m_count = baseCount; + treeBoxStack[0].m_sum = sum; + treeBoxStack[0].m_sum2 = sum2; + treeBoxStack[0].m_min = minP; + treeBoxStack[0].m_max = maxP; + treeBoxStack[0].m_child = nullptr; + treeBoxStack[0].m_parent = nullptr; + + int stack = 1; + ConvexHullAABBTreeNode* root = nullptr; + while (stack) + { + stack--; + dTreeBox box(treeBoxStack[stack]); + if (box.m_count <= VHACD_CONVEXHULL_3D_VERTEX_CLUSTER_SIZE) + { + assert(memoryPool.size() != memoryPool.capacity() + && "memoryPool is going to be reallocated, pointers will be invalid"); + memoryPool.emplace_back(); + ConvexHullAABBTreeNode& clump = memoryPool.back(); + + clump.m_count = box.m_count; + for (int i = 0; i < box.m_count; ++i) + { + clump.m_indices[i] = i + box.m_start; + } + clump.m_box[0] = box.m_min; + clump.m_box[1] = box.m_max; + + if (box.m_child) + { + *box.m_child = &clump; + } + + if (!root) + { + root = &clump; + } + } + else + { + const VHACD::Vect3 origin(box.m_sum * (double(1.0) / box.m_count)); + const VHACD::Vect3 variance2(box.m_sum2 * (double(1.0) / box.m_count) - origin.CWiseMul(origin)); + + int firstSortAxis = 0; + if ((variance2.GetY() >= variance2.GetX()) && (variance2.GetY() >= variance2.GetZ())) + { + firstSortAxis = 1; + } + else if ((variance2.GetZ() >= variance2.GetX()) && (variance2.GetZ() >= variance2.GetY())) + { + firstSortAxis = 2; + } + double axisVal = origin[firstSortAxis]; + + int i0 = 0; + int i1 = box.m_count - 1; + + const int start = box.m_start; + while (i0 < i1) + { + while ((points[start + i0][firstSortAxis] <= axisVal) && (i0 < i1)) + { + ++i0; + }; + + while ((points[start + i1][firstSortAxis] > axisVal) && (i0 < i1)) + { + --i1; + } + + assert(i0 <= i1); + if (i0 < i1) + { + std::swap(points[start + i0], + points[start + i1]); + ++i0; + --i1; + } + } + + while ((points[start + i0][firstSortAxis] <= axisVal) && (i0 < box.m_count)) + { + ++i0; + }; + + #ifdef _DEBUG + for (int i = 0; i < i0; ++i) + { + assert(points[start + i][firstSortAxis] <= axisVal); + } + + for (int i = i0; i < box.m_count; ++i) + { + assert(points[start + i][firstSortAxis] > axisVal); + } + #endif + + assert(memoryPool.size() != memoryPool.capacity() + && "memoryPool is going to be reallocated, pointers will be invalid"); + memoryPool.emplace_back(); + ConvexHullAABBTreeNode& node = memoryPool.back(); + + node.m_box[0] = box.m_min; + node.m_box[1] = box.m_max; + if (box.m_child) + { + *box.m_child = &node; + } + + if (!root) + { + root = &node; + } + + { + VHACD::Vect3 xc(0); + VHACD::Vect3 x2c(0); + VHACD::Vect3 p0(double( 1.0e15)); + VHACD::Vect3 p1(double(-1.0e15)); + for (int i = i0; i < box.m_count; ++i) + { + const VHACD::Vect3& p = points[start + i]; + xc += p; + x2c += p.CWiseMul(p); + p0 = p0.CWiseMin(p); + p1 = p1.CWiseMax(p); + } + + dTreeBox cluster_i1(box); + cluster_i1.m_start = start + i0; + cluster_i1.m_count = box.m_count - i0; + cluster_i1.m_sum = xc; + cluster_i1.m_sum2 = x2c; + cluster_i1.m_min = p0; + cluster_i1.m_max = p1; + cluster_i1.m_parent = &node; + cluster_i1.m_child = &node.m_right; + treeBoxStack[stack] = cluster_i1; + assert(cluster_i1.m_count > 0); + stack++; + } + + { + VHACD::Vect3 xc(0); + VHACD::Vect3 x2c(0); + VHACD::Vect3 p0(double( 1.0e15)); + VHACD::Vect3 p1(double(-1.0e15)); + for (int i = 0; i < i0; ++i) + { + const VHACD::Vect3& p = points[start + i]; + xc += p; + x2c += p.CWiseMul(p); + p0 = p0.CWiseMin(p); + p1 = p1.CWiseMax(p); + } + + dTreeBox cluster_i0(box); + cluster_i0.m_start = start; + cluster_i0.m_count = i0; + cluster_i0.m_min = p0; + cluster_i0.m_max = p1; + cluster_i0.m_sum = xc; + cluster_i0.m_sum2 = x2c; + cluster_i0.m_parent = &node; + cluster_i0.m_child = &node.m_left; + assert(cluster_i0.m_count > 0); + treeBoxStack[stack] = cluster_i0; + stack++; + } + } + } + + return root; +} + +int ConvexHull::SupportVertex(ConvexHullAABBTreeNode** const treePointer, + const std::vector& points, + const VHACD::Vect3& dirPlane, + const bool removeEntry) const +{ +#define VHACD_STACK_DEPTH_3D 64 + double aabbProjection[VHACD_STACK_DEPTH_3D]; + ConvexHullAABBTreeNode* stackPool[VHACD_STACK_DEPTH_3D]; + + VHACD::Vect3 dir(dirPlane); + + int index = -1; + int stack = 1; + stackPool[0] = *treePointer; + aabbProjection[0] = double(1.0e20); + double maxProj = double(-1.0e20); + int ix = (dir[0] > double(0.0)) ? 1 : 0; + int iy = (dir[1] > double(0.0)) ? 1 : 0; + int iz = (dir[2] > double(0.0)) ? 1 : 0; + while (stack) + { + stack--; + double boxSupportValue = aabbProjection[stack]; + if (boxSupportValue > maxProj) + { + ConvexHullAABBTreeNode* me = stackPool[stack]; + + /* + * If the node is not a leaf node... + */ + if (me->m_left && me->m_right) + { + const VHACD::Vect3 leftSupportPoint(me->m_left->m_box[ix].GetX(), + me->m_left->m_box[iy].GetY(), + me->m_left->m_box[iz].GetZ()); + double leftSupportDist = leftSupportPoint.Dot(dir); + + const VHACD::Vect3 rightSupportPoint(me->m_right->m_box[ix].GetX(), + me->m_right->m_box[iy].GetY(), + me->m_right->m_box[iz].GetZ()); + double rightSupportDist = rightSupportPoint.Dot(dir); + + /* + * ...push the shorter side first + * So we can explore the tree in the larger side first + */ + if (rightSupportDist >= leftSupportDist) + { + aabbProjection[stack] = leftSupportDist; + stackPool[stack] = me->m_left; + stack++; + assert(stack < VHACD_STACK_DEPTH_3D); + aabbProjection[stack] = rightSupportDist; + stackPool[stack] = me->m_right; + stack++; + assert(stack < VHACD_STACK_DEPTH_3D); + } + else + { + aabbProjection[stack] = rightSupportDist; + stackPool[stack] = me->m_right; + stack++; + assert(stack < VHACD_STACK_DEPTH_3D); + aabbProjection[stack] = leftSupportDist; + stackPool[stack] = me->m_left; + stack++; + assert(stack < VHACD_STACK_DEPTH_3D); + } + } + /* + * If it is a node... + */ + else + { + ConvexHullAABBTreeNode* cluster = me; + for (size_t i = 0; i < cluster->m_count; ++i) + { + const ConvexHullVertex& p = points[cluster->m_indices[i]]; + assert(p.GetX() >= cluster->m_box[0].GetX()); + assert(p.GetX() <= cluster->m_box[1].GetX()); + assert(p.GetY() >= cluster->m_box[0].GetY()); + assert(p.GetY() <= cluster->m_box[1].GetY()); + assert(p.GetZ() >= cluster->m_box[0].GetZ()); + assert(p.GetZ() <= cluster->m_box[1].GetZ()); + if (!p.m_mark) + { + //assert(p.m_w == double(0.0f)); + double dist = p.Dot(dir); + if (dist > maxProj) + { + maxProj = dist; + index = cluster->m_indices[i]; + } + } + else if (removeEntry) + { + cluster->m_indices[i] = cluster->m_indices[cluster->m_count - 1]; + cluster->m_count = cluster->m_count - 1; + i--; + } + } + + if (cluster->m_count == 0) + { + ConvexHullAABBTreeNode* const parent = cluster->m_parent; + if (parent) + { + ConvexHullAABBTreeNode* const sibling = (parent->m_left != cluster) ? parent->m_left : parent->m_right; + assert(sibling != cluster); + ConvexHullAABBTreeNode* const grandParent = parent->m_parent; + if (grandParent) + { + sibling->m_parent = grandParent; + if (grandParent->m_right == parent) + { + grandParent->m_right = sibling; + } + else + { + grandParent->m_left = sibling; + } + } + else + { + sibling->m_parent = nullptr; + *treePointer = sibling; + } + } + } + } + } + } + + assert(index != -1); + return index; +} + +double ConvexHull::TetrahedrumVolume(const VHACD::Vect3& p0, + const VHACD::Vect3& p1, + const VHACD::Vect3& p2, + const VHACD::Vect3& p3) const +{ + const VHACD::Vect3 p1p0(p1 - p0); + const VHACD::Vect3 p2p0(p2 - p0); + const VHACD::Vect3 p3p0(p3 - p0); + return p3p0.Dot(p1p0.Cross(p2p0)); +} + +int ConvexHull::InitVertexArray(std::vector& points, + NodeBundle& memoryPool) +// std::vector& memoryPool) +{ +#if 1 + ConvexHullAABBTreeNode* tree = BuildTreeOld(points, + memoryPool); +#else + ConvexHullAABBTreeNode* tree = BuildTreeNew(points, (char**)&memoryPool, maxMemSize); +#endif + int count = int(points.size()); + if (count < 4) + { + m_points.resize(0); + return 0; + } + + m_points.resize(count); + m_aabbP0 = tree->m_box[0]; + m_aabbP1 = tree->m_box[1]; + + VHACD::Vect3 boxSize(tree->m_box[1] - tree->m_box[0]); + m_diag = boxSize.GetNorm(); + const ndNormalMap& normalMap = ndNormalMap::GetNormalMap(); + + int index0 = SupportVertex(&tree, + points, + normalMap.m_normal[0]); + m_points[0] = points[index0]; + points[index0].m_mark = 1; + + bool validTetrahedrum = false; + VHACD::Vect3 e1(double(0.0)); + for (int i = 1; i < normalMap.m_count; ++i) + { + int index = SupportVertex(&tree, + points, + normalMap.m_normal[i]); + assert(index >= 0); + + e1 = points[index] - m_points[0]; + double error2 = e1.GetNormSquared(); + if (error2 > (double(1.0e-4) * m_diag * m_diag)) + { + m_points[1] = points[index]; + points[index].m_mark = 1; + validTetrahedrum = true; + break; + } + } + if (!validTetrahedrum) + { + m_points.resize(0); + assert(0); + return count; + } + + validTetrahedrum = false; + VHACD::Vect3 e2(double(0.0)); + VHACD::Vect3 normal(double(0.0)); + for (int i = 2; i < normalMap.m_count; ++i) + { + int index = SupportVertex(&tree, + points, + normalMap.m_normal[i]); + assert(index >= 0); + e2 = points[index] - m_points[0]; + normal = e1.Cross(e2); + double error2 = normal.GetNorm(); + if (error2 > (double(1.0e-4) * m_diag * m_diag)) + { + m_points[2] = points[index]; + points[index].m_mark = 1; + validTetrahedrum = true; + break; + } + } + + if (!validTetrahedrum) + { + m_points.resize(0); + assert(0); + return count; + } + + // find the largest possible tetrahedron + validTetrahedrum = false; + VHACD::Vect3 e3(double(0.0)); + + index0 = SupportVertex(&tree, + points, + normal); + e3 = points[index0] - m_points[0]; + double err2 = normal.Dot(e3); + if (fabs(err2) > (double(1.0e-6) * m_diag * m_diag)) + { + // we found a valid tetrahedral, about and start build the hull by adding the rest of the points + m_points[3] = points[index0]; + points[index0].m_mark = 1; + validTetrahedrum = true; + } + if (!validTetrahedrum) + { + VHACD::Vect3 n(-normal); + int index = SupportVertex(&tree, + points, + n); + e3 = points[index] - m_points[0]; + double error2 = normal.Dot(e3); + if (fabs(error2) > (double(1.0e-6) * m_diag * m_diag)) + { + // we found a valid tetrahedral, about and start build the hull by adding the rest of the points + m_points[3] = points[index]; + points[index].m_mark = 1; + validTetrahedrum = true; + } + } + if (!validTetrahedrum) + { + for (int i = 3; i < normalMap.m_count; ++i) + { + int index = SupportVertex(&tree, + points, + normalMap.m_normal[i]); + assert(index >= 0); + + //make sure the volume of the fist tetrahedral is no negative + e3 = points[index] - m_points[0]; + double error2 = normal.Dot(e3); + if (fabs(error2) > (double(1.0e-6) * m_diag * m_diag)) + { + // we found a valid tetrahedral, about and start build the hull by adding the rest of the points + m_points[3] = points[index]; + points[index].m_mark = 1; + validTetrahedrum = true; + break; + } + } + } + if (!validTetrahedrum) + { + // the points do not form a convex hull + m_points.resize(0); + return count; + } + + m_points.resize(4); + double volume = TetrahedrumVolume(m_points[0], + m_points[1], + m_points[2], + m_points[3]); + if (volume > double(0.0)) + { + std::swap(m_points[2], + m_points[3]); + } + assert(TetrahedrumVolume(m_points[0], m_points[1], m_points[2], m_points[3]) < double(0.0)); + return count; +} + +std::list::iterator ConvexHull::AddFace(int i0, + int i1, + int i2) +{ + ConvexHullFace face; + face.m_index[0] = i0; + face.m_index[1] = i1; + face.m_index[2] = i2; + + std::list::iterator node = m_list.emplace(m_list.end(), face); + return node; +} + +void ConvexHull::CalculateConvexHull3D(ConvexHullAABBTreeNode* vertexTree, + std::vector& points, + int count, + double distTol, + int maxVertexCount) +{ + distTol = fabs(distTol) * m_diag; + std::list::iterator f0Node = AddFace(0, 1, 2); + std::list::iterator f1Node = AddFace(0, 2, 3); + std::list::iterator f2Node = AddFace(2, 1, 3); + std::list::iterator f3Node = AddFace(1, 0, 3); + + ConvexHullFace& f0 = *f0Node; + ConvexHullFace& f1 = *f1Node; + ConvexHullFace& f2 = *f2Node; + ConvexHullFace& f3 = *f3Node; + + f0.m_twin[0] = f3Node; + f0.m_twin[1] = f2Node; + f0.m_twin[2] = f1Node; + + f1.m_twin[0] = f0Node; + f1.m_twin[1] = f2Node; + f1.m_twin[2] = f3Node; + + f2.m_twin[0] = f0Node; + f2.m_twin[1] = f3Node; + f2.m_twin[2] = f1Node; + + f3.m_twin[0] = f0Node; + f3.m_twin[1] = f1Node; + f3.m_twin[2] = f2Node; + + std::list::iterator> boundaryFaces; + boundaryFaces.push_back(f0Node); + boundaryFaces.push_back(f1Node); + boundaryFaces.push_back(f2Node); + boundaryFaces.push_back(f3Node); + + m_points.resize(count); + + count -= 4; + maxVertexCount -= 4; + int currentIndex = 4; + + /* + * Some are iterators into boundaryFaces, others into m_list + */ + std::vector::iterator> stack; + std::vector::iterator> coneList; + std::vector::iterator> deleteList; + + stack.reserve(1024 + count); + coneList.reserve(1024 + count); + deleteList.reserve(1024 + count); + + while (boundaryFaces.size() && count && (maxVertexCount > 0)) + { + // my definition of the optimal convex hull of a given vertex count, + // is the convex hull formed by a subset of the input vertex that minimizes the volume difference + // between the perfect hull formed from all input vertex and the hull of the sub set of vertex. + // When using a priority heap this algorithms will generate the an optimal of a fix vertex count. + // Since all Newton's tools do not have a limit on the point count of a convex hull, I can use either a stack or a queue. + // a stack maximize construction speed, a Queue tend to maximize the volume of the generated Hull approaching a perfect Hull. + // For now we use a queue. + // For general hulls it does not make a difference if we use a stack, queue, or a priority heap. + // perfect optimal hull only apply for when build hull of a limited vertex count. + // + // Also when building Hulls of a limited vertex count, this function runs in constant time. + // yes that is correct, it does not makes a difference if you build a N point hull from 100 vertex + // or from 100000 vertex input array. + + // using a queue (some what slower by better hull when reduced vertex count is desired) + bool isvalid; + std::list::iterator faceNode = boundaryFaces.back(); + ConvexHullFace& face = *faceNode; + HullPlane planeEquation(face.GetPlaneEquation(m_points, isvalid)); + + int index = 0; + double dist = 0; + VHACD::Vect3 p; + if (isvalid) + { + index = SupportVertex(&vertexTree, + points, + planeEquation); + p = points[index]; + dist = planeEquation.Evalue(p); + } + + if ( isvalid + && (dist >= distTol) + && (face.Evalue(m_points, p) < double(0.0))) + { + stack.push_back(faceNode); + + deleteList.clear(); + while (stack.size()) + { + std::list::iterator node1 = stack.back(); + ConvexHullFace& face1 = *node1; + + stack.pop_back(); + + if (!face1.m_mark && (face1.Evalue(m_points, p) < double(0.0))) + { + #ifdef _DEBUG + for (const auto node : deleteList) + { + assert(node != node1); + } + #endif + + deleteList.push_back(node1); + face1.m_mark = 1; + for (std::list::iterator& twinNode : face1.m_twin) + { + ConvexHullFace& twinFace = *twinNode; + if (!twinFace.m_mark) + { + stack.push_back(twinNode); + } + } + } + } + + m_points[currentIndex] = points[index]; + points[index].m_mark = 1; + + coneList.clear(); + for (std::list::iterator node1 : deleteList) + { + ConvexHullFace& face1 = *node1; + assert(face1.m_mark == 1); + for (std::size_t j0 = 0; j0 < face1.m_twin.size(); ++j0) + { + std::list::iterator twinNode = face1.m_twin[j0]; + ConvexHullFace& twinFace = *twinNode; + if (!twinFace.m_mark) + { + std::size_t j1 = (j0 == 2) ? 0 : j0 + 1; + std::list::iterator newNode = AddFace(currentIndex, + face1.m_index[j0], + face1.m_index[j1]); + boundaryFaces.push_front(newNode); + ConvexHullFace& newFace = *newNode; + + newFace.m_twin[1] = twinNode; + for (std::size_t k = 0; k < twinFace.m_twin.size(); ++k) + { + if (twinFace.m_twin[k] == node1) + { + twinFace.m_twin[k] = newNode; + } + } + coneList.push_back(newNode); + } + } + } + + for (std::size_t i = 0; i < coneList.size() - 1; ++i) + { + std::list::iterator nodeA = coneList[i]; + ConvexHullFace& faceA = *nodeA; + assert(faceA.m_mark == 0); + for (std::size_t j = i + 1; j < coneList.size(); j++) + { + std::list::iterator nodeB = coneList[j]; + ConvexHullFace& faceB = *nodeB; + assert(faceB.m_mark == 0); + if (faceA.m_index[2] == faceB.m_index[1]) + { + faceA.m_twin[2] = nodeB; + faceB.m_twin[0] = nodeA; + break; + } + } + + for (std::size_t j = i + 1; j < coneList.size(); j++) + { + std::list::iterator nodeB = coneList[j]; + ConvexHullFace& faceB = *nodeB; + assert(faceB.m_mark == 0); + if (faceA.m_index[1] == faceB.m_index[2]) + { + faceA.m_twin[0] = nodeB; + faceB.m_twin[2] = nodeA; + break; + } + } + } + + for (std::list::iterator node : deleteList) + { + auto it = std::find(boundaryFaces.begin(), + boundaryFaces.end(), + node); + if (it != boundaryFaces.end()) + { + boundaryFaces.erase(it); + } + m_list.erase(node); + } + + maxVertexCount--; + currentIndex++; + count--; + } + else + { + auto it = std::find(boundaryFaces.begin(), + boundaryFaces.end(), + faceNode); + if (it != boundaryFaces.end()) + { + boundaryFaces.erase(it); + } + } + } + m_points.resize(currentIndex); +} + +//*********************************************************************************************** +// End of ConvexHull generation code by Julio Jerez +//*********************************************************************************************** + +class KdTreeNode; + +enum Axes +{ + X_AXIS = 0, + Y_AXIS = 1, + Z_AXIS = 2 +}; + +class KdTreeFindNode +{ +public: + KdTreeFindNode() = default; + + KdTreeNode* m_node{ nullptr }; + double m_distance{ 0.0 }; +}; + +class KdTree +{ +public: + KdTree() = default; + + const VHACD::Vertex& GetPosition(uint32_t index) const; + + uint32_t Search(const VHACD::Vect3& pos, + double radius, + uint32_t maxObjects, + KdTreeFindNode* found) const; + + uint32_t Add(const VHACD::Vertex& v); + + KdTreeNode& GetNewNode(uint32_t index); + + uint32_t GetNearest(const VHACD::Vect3& pos, + double radius, + bool& _found) const; // returns the nearest possible neighbor's index. + + const std::vector& GetVertices() const; + std::vector&& TakeVertices(); + + uint32_t GetVCount() const; + +private: + KdTreeNode* m_root{ nullptr }; + NodeBundle m_bundle; + + std::vector m_vertices; +}; + +class KdTreeNode +{ +public: + KdTreeNode() = default; + KdTreeNode(uint32_t index); + + void Add(KdTreeNode& node, + Axes dim, + const KdTree& iface); + + uint32_t GetIndex() const; + + void Search(Axes axis, + const VHACD::Vect3& pos, + double radius, + uint32_t& count, + uint32_t maxObjects, + KdTreeFindNode* found, + const KdTree& iface); + +private: + uint32_t m_index = 0; + KdTreeNode* m_left = nullptr; + KdTreeNode* m_right = nullptr; +}; + +const VHACD::Vertex& KdTree::GetPosition(uint32_t index) const +{ + assert(index < m_vertices.size()); + return m_vertices[index]; +} + +uint32_t KdTree::Search(const VHACD::Vect3& pos, + double radius, + uint32_t maxObjects, + KdTreeFindNode* found) const +{ + if (!m_root) + return 0; + uint32_t count = 0; + m_root->Search(X_AXIS, pos, radius, count, maxObjects, found, *this); + return count; +} + +uint32_t KdTree::Add(const VHACD::Vertex& v) +{ + uint32_t ret = uint32_t(m_vertices.size()); + m_vertices.emplace_back(v); + KdTreeNode& node = GetNewNode(ret); + if (m_root) + { + m_root->Add(node, + X_AXIS, + *this); + } + else + { + m_root = &node; + } + return ret; +} + +KdTreeNode& KdTree::GetNewNode(uint32_t index) +{ + KdTreeNode& node = m_bundle.GetNextNode(); + node = KdTreeNode(index); + return node; +} + +uint32_t KdTree::GetNearest(const VHACD::Vect3& pos, + double radius, + bool& _found) const // returns the nearest possible neighbor's index. +{ + uint32_t ret = 0; + + _found = false; + KdTreeFindNode found; + uint32_t count = Search(pos, radius, 1, &found); + if (count) + { + KdTreeNode* node = found.m_node; + ret = node->GetIndex(); + _found = true; + } + return ret; +} + +const std::vector& KdTree::GetVertices() const +{ + return m_vertices; +} + +std::vector&& KdTree::TakeVertices() +{ + return std::move(m_vertices); +} + +uint32_t KdTree::GetVCount() const +{ + return uint32_t(m_vertices.size()); +} + +KdTreeNode::KdTreeNode(uint32_t index) + : m_index(index) +{ +} + +void KdTreeNode::Add(KdTreeNode& node, + Axes dim, + const KdTree& tree) +{ + Axes axis = X_AXIS; + uint32_t idx = 0; + switch (dim) + { + case X_AXIS: + idx = 0; + axis = Y_AXIS; + break; + case Y_AXIS: + idx = 1; + axis = Z_AXIS; + break; + case Z_AXIS: + idx = 2; + axis = X_AXIS; + break; + } + + const VHACD::Vertex& nodePosition = tree.GetPosition(node.m_index); + const VHACD::Vertex& position = tree.GetPosition(m_index); + if (nodePosition[idx] <= position[idx]) + { + if (m_left) + m_left->Add(node, axis, tree); + else + m_left = &node; + } + else + { + if (m_right) + m_right->Add(node, axis, tree); + else + m_right = &node; + } +} + +uint32_t KdTreeNode::GetIndex() const +{ + return m_index; +} + +void KdTreeNode::Search(Axes axis, + const VHACD::Vect3& pos, + double radius, + uint32_t& count, + uint32_t maxObjects, + KdTreeFindNode* found, + const KdTree& iface) +{ + const VHACD::Vect3 position = iface.GetPosition(m_index); + + const VHACD::Vect3 d = pos - position; + + KdTreeNode* search1 = 0; + KdTreeNode* search2 = 0; + + uint32_t idx = 0; + switch (axis) + { + case X_AXIS: + idx = 0; + axis = Y_AXIS; + break; + case Y_AXIS: + idx = 1; + axis = Z_AXIS; + break; + case Z_AXIS: + idx = 2; + axis = X_AXIS; + break; + } + + if (d[idx] <= 0) // JWR if we are to the left + { + search1 = m_left; // JWR then search to the left + if (-d[idx] < radius) // JWR if distance to the right is less than our search radius, continue on the right + // as well. + search2 = m_right; + } + else + { + search1 = m_right; // JWR ok, we go down the left tree + if (d[idx] < radius) // JWR if the distance from the right is less than our search radius + search2 = m_left; + } + + double r2 = radius * radius; + double m = d.GetNormSquared(); + + if (m < r2) + { + switch (count) + { + case 0: + { + found[count].m_node = this; + found[count].m_distance = m; + break; + } + case 1: + { + if (m < found[0].m_distance) + { + if (maxObjects == 1) + { + found[0].m_node = this; + found[0].m_distance = m; + } + else + { + found[1] = found[0]; + found[0].m_node = this; + found[0].m_distance = m; + } + } + else if (maxObjects > 1) + { + found[1].m_node = this; + found[1].m_distance = m; + } + break; + } + default: + { + bool inserted = false; + + for (uint32_t i = 0; i < count; i++) + { + if (m < found[i].m_distance) // if this one is closer than a pre-existing one... + { + // insertion sort... + uint32_t scan = count; + if (scan >= maxObjects) + scan = maxObjects - 1; + for (uint32_t j = scan; j > i; j--) + { + found[j] = found[j - 1]; + } + found[i].m_node = this; + found[i].m_distance = m; + inserted = true; + break; + } + } + + if (!inserted && count < maxObjects) + { + found[count].m_node = this; + found[count].m_distance = m; + } + } + break; + } + + count++; + + if (count > maxObjects) + { + count = maxObjects; + } + } + + + if (search1) + search1->Search(axis, pos, radius, count, maxObjects, found, iface); + + if (search2) + search2->Search(axis, pos, radius, count, maxObjects, found, iface); +} + +class VertexIndex +{ +public: + VertexIndex(double granularity, + bool snapToGrid); + + VHACD::Vect3 SnapToGrid(VHACD::Vect3 p); + + uint32_t GetIndex(VHACD::Vect3 p, + bool& newPos); + + const std::vector& GetVertices() const; + + std::vector&& TakeVertices(); + + uint32_t GetVCount() const; + + bool SaveAsObj(const char* fname, + uint32_t tcount, + uint32_t* indices) + { + bool ret = false; + + FILE* fph = fopen(fname, "wb"); + if (fph) + { + ret = true; + + const std::vector& v = GetVertices(); + for (uint32_t i = 0; i < v.size(); ++i) + { + fprintf(fph, "v %0.9f %0.9f %0.9f\r\n", + v[i].mX, + v[i].mY, + v[i].mZ); + } + + for (uint32_t i = 0; i < tcount; i++) + { + uint32_t i1 = *indices++; + uint32_t i2 = *indices++; + uint32_t i3 = *indices++; + fprintf(fph, "f %d %d %d\r\n", + i1 + 1, + i2 + 1, + i3 + 1); + } + fclose(fph); + } + + return ret; + } + +private: + bool m_snapToGrid : 1; + double m_granularity; + KdTree m_KdTree; +}; + +VertexIndex::VertexIndex(double granularity, + bool snapToGrid) + : m_snapToGrid(snapToGrid) + , m_granularity(granularity) +{ +} + +VHACD::Vect3 VertexIndex::SnapToGrid(VHACD::Vect3 p) +{ + for (int i = 0; i < 3; ++i) + { + double m = fmod(p[i], m_granularity); + p[i] -= m; + } + return p; +} + +uint32_t VertexIndex::GetIndex(VHACD::Vect3 p, + bool& newPos) +{ + uint32_t ret; + + newPos = false; + + if (m_snapToGrid) + { + p = SnapToGrid(p); + } + + bool found; + ret = m_KdTree.GetNearest(p, m_granularity, found); + if (!found) + { + newPos = true; + ret = m_KdTree.Add(VHACD::Vertex(p.GetX(), p.GetY(), p.GetZ())); + } + + return ret; +} + +const std::vector& VertexIndex::GetVertices() const +{ + return m_KdTree.GetVertices(); +} + +std::vector&& VertexIndex::TakeVertices() +{ + return std::move(m_KdTree.TakeVertices()); +} + +uint32_t VertexIndex::GetVCount() const +{ + return m_KdTree.GetVCount(); +} + +/* + * A wrapper class for 3 10 bit integers packed into a 32 bit integer + * Layout is [PAD][X][Y][Z] + * Pad is bits 31-30, X is 29-20, Y is 19-10, and Z is 9-0 + */ +class Voxel +{ + /* + * Specify all of them for consistency + */ + static constexpr int VoxelBitsZStart = 0; + static constexpr int VoxelBitsYStart = 10; + static constexpr int VoxelBitsXStart = 20; + static constexpr int VoxelBitMask = 0x03FF; // bits 0 through 9 inclusive +public: + Voxel() = default; + + Voxel(uint32_t index); + + Voxel(uint32_t x, + uint32_t y, + uint32_t z); + + bool operator==(const Voxel &v) const; + + VHACD::Vector3 GetVoxel() const; + + uint32_t GetX() const; + uint32_t GetY() const; + uint32_t GetZ() const; + + uint32_t GetVoxelAddress() const; + +private: + uint32_t m_voxel{ 0 }; +}; + +Voxel::Voxel(uint32_t index) + : m_voxel(index) +{ +} + +Voxel::Voxel(uint32_t x, + uint32_t y, + uint32_t z) + : m_voxel((x << VoxelBitsXStart) | (y << VoxelBitsYStart) | (z << VoxelBitsZStart)) +{ + assert(x < 1024 && "Voxel constructed with X outside of range"); + assert(y < 1024 && "Voxel constructed with Y outside of range"); + assert(z < 1024 && "Voxel constructed with Z outside of range"); +} + +bool Voxel::operator==(const Voxel& v) const +{ + return m_voxel == v.m_voxel; +} + +VHACD::Vector3 Voxel::GetVoxel() const +{ + return VHACD::Vector3(GetX(), GetY(), GetZ()); +} + +uint32_t Voxel::GetX() const +{ + return (m_voxel >> VoxelBitsXStart) & VoxelBitMask; +} + +uint32_t Voxel::GetY() const +{ + return (m_voxel >> VoxelBitsYStart) & VoxelBitMask; +} + +uint32_t Voxel::GetZ() const +{ + return (m_voxel >> VoxelBitsZStart) & VoxelBitMask; +} + +uint32_t Voxel::GetVoxelAddress() const +{ + return m_voxel; +} + +struct SimpleMesh +{ + std::vector m_vertices; + std::vector m_indices; +}; + +/*======================== 0-tests ========================*/ +inline bool IntersectRayAABB(const VHACD::Vect3& start, + const VHACD::Vect3& dir, + const VHACD::BoundsAABB& bounds, + double& t) +{ + //! calculate candidate plane on each axis + bool inside = true; + VHACD::Vect3 ta(double(-1.0)); + + //! use unrolled loops + for (uint32_t i = 0; i < 3; ++i) + { + if (start[i] < bounds.GetMin()[i]) + { + if (dir[i] != double(0.0)) + ta[i] = (bounds.GetMin()[i] - start[i]) / dir[i]; + inside = false; + } + else if (start[i] > bounds.GetMax()[i]) + { + if (dir[i] != double(0.0)) + ta[i] = (bounds.GetMax()[i] - start[i]) / dir[i]; + inside = false; + } + } + + //! if point inside all planes + if (inside) + { + t = double(0.0); + return true; + } + + //! we now have t values for each of possible intersection planes + //! find the maximum to get the intersection point + uint32_t taxis; + double tmax = ta.MaxCoeff(taxis); + + if (tmax < double(0.0)) + return false; + + //! check that the intersection point lies on the plane we picked + //! we don't test the axis of closest intersection for precision reasons + + //! no eps for now + double eps = double(0.0); + + VHACD::Vect3 hit = start + dir * tmax; + + if (( hit.GetX() < bounds.GetMin().GetX() - eps + || hit.GetX() > bounds.GetMax().GetX() + eps) + && taxis != 0) + return false; + if (( hit.GetY() < bounds.GetMin().GetY() - eps + || hit.GetY() > bounds.GetMax().GetY() + eps) + && taxis != 1) + return false; + if (( hit.GetZ() < bounds.GetMin().GetZ() - eps + || hit.GetZ() > bounds.GetMax().GetZ() + eps) + && taxis != 2) + return false; + + //! output results + t = tmax; + + return true; +} + +// Moller and Trumbore's method +inline bool IntersectRayTriTwoSided(const VHACD::Vect3& p, + const VHACD::Vect3& dir, + const VHACD::Vect3& a, + const VHACD::Vect3& b, + const VHACD::Vect3& c, + double& t, + double& u, + double& v, + double& w, + double& sign, + VHACD::Vect3* normal) +{ + VHACD::Vect3 ab = b - a; + VHACD::Vect3 ac = c - a; + VHACD::Vect3 n = ab.Cross(ac); + + double d = -dir.Dot(n); + double ood = double(1.0) / d; // No need to check for division by zero here as infinity arithmetic will save us... + VHACD::Vect3 ap = p - a; + + t = ap.Dot(n) * ood; + if (t < double(0.0)) + { + return false; + } + + VHACD::Vect3 e = -dir.Cross(ap); + v = ac.Dot(e) * ood; + if (v < double(0.0) || v > double(1.0)) // ...here... + { + return false; + } + w = -ab.Dot(e) * ood; + if (w < double(0.0) || v + w > double(1.0)) // ...and here + { + return false; + } + + u = double(1.0) - v - w; + if (normal) + { + *normal = n; + } + + sign = d; + + return true; +} + +// RTCD 5.1.5, page 142 +inline VHACD::Vect3 ClosestPointOnTriangle(const VHACD::Vect3& a, + const VHACD::Vect3& b, + const VHACD::Vect3& c, + const VHACD::Vect3& p, + double& v, + double& w) +{ + VHACD::Vect3 ab = b - a; + VHACD::Vect3 ac = c - a; + VHACD::Vect3 ap = p - a; + + double d1 = ab.Dot(ap); + double d2 = ac.Dot(ap); + if ( d1 <= double(0.0) + && d2 <= double(0.0)) + { + v = double(0.0); + w = double(0.0); + return a; + } + + VHACD::Vect3 bp = p - b; + double d3 = ab.Dot(bp); + double d4 = ac.Dot(bp); + if ( d3 >= double(0.0) + && d4 <= d3) + { + v = double(1.0); + w = double(0.0); + return b; + } + + double vc = d1 * d4 - d3 * d2; + if ( vc <= double(0.0) + && d1 >= double(0.0) + && d3 <= double(0.0)) + { + v = d1 / (d1 - d3); + w = double(0.0); + return a + v * ab; + } + + VHACD::Vect3 cp = p - c; + double d5 = ab.Dot(cp); + double d6 = ac.Dot(cp); + if (d6 >= double(0.0) && d5 <= d6) + { + v = double(0.0); + w = double(1.0); + return c; + } + + double vb = d5 * d2 - d1 * d6; + if ( vb <= double(0.0) + && d2 >= double(0.0) + && d6 <= double(0.0)) + { + v = double(0.0); + w = d2 / (d2 - d6); + return a + w * ac; + } + + double va = d3 * d6 - d5 * d4; + if ( va <= double(0.0) + && (d4 - d3) >= double(0.0) + && (d5 - d6) >= double(0.0)) + { + w = (d4 - d3) / ((d4 - d3) + (d5 - d6)); + v = double(1.0) - w; + return b + w * (c - b); + } + + double denom = double(1.0) / (va + vb + vc); + v = vb * denom; + w = vc * denom; + return a + ab * v + ac * w; +} + +class AABBTree +{ +public: + AABBTree() = default; + AABBTree(AABBTree&&) = default; + AABBTree& operator=(AABBTree&&) = default; + + AABBTree(const std::vector& vertices, + const std::vector& indices); + + bool TraceRay(const VHACD::Vect3& start, + const VHACD::Vect3& to, + double& outT, + double& faceSign, + VHACD::Vect3& hitLocation) const; + + bool TraceRay(const VHACD::Vect3& start, + const VHACD::Vect3& dir, + uint32_t& insideCount, + uint32_t& outsideCount) const; + + bool TraceRay(const VHACD::Vect3& start, + const VHACD::Vect3& dir, + double& outT, + double& u, + double& v, + double& w, + double& faceSign, + uint32_t& faceIndex) const; + + VHACD::Vect3 GetCenter() const; + VHACD::Vect3 GetMinExtents() const; + VHACD::Vect3 GetMaxExtents() const; + + bool GetClosestPointWithinDistance(const VHACD::Vect3& point, + double maxDistance, + VHACD::Vect3& closestPoint) const; + +private: + struct Node + { + union + { + uint32_t m_children; + uint32_t m_numFaces{ 0 }; + }; + + uint32_t* m_faces{ nullptr }; + VHACD::BoundsAABB m_extents; + }; + + struct FaceSorter + { + FaceSorter(const std::vector& positions, + const std::vector& indices, + uint32_t axis); + + bool operator()(uint32_t lhs, uint32_t rhs) const; + + double GetCentroid(uint32_t face) const; + + const std::vector& m_vertices; + const std::vector& m_indices; + uint32_t m_axis; + }; + + // partition the objects and return the number of objects in the lower partition + uint32_t PartitionMedian(Node& n, + uint32_t* faces, + uint32_t numFaces); + uint32_t PartitionSAH(Node& n, + uint32_t* faces, + uint32_t numFaces); + + void Build(); + + void BuildRecursive(uint32_t nodeIndex, + uint32_t* faces, + uint32_t numFaces); + + void TraceRecursive(uint32_t nodeIndex, + const VHACD::Vect3& start, + const VHACD::Vect3& dir, + double& outT, + double& u, + double& v, + double& w, + double& faceSign, + uint32_t& faceIndex) const; + + + bool GetClosestPointWithinDistance(const VHACD::Vect3& point, + const double maxDis, + double& dis, + double& v, + double& w, + uint32_t& faceIndex, + VHACD::Vect3& closest) const; + + void GetClosestPointWithinDistanceSqRecursive(uint32_t nodeIndex, + const VHACD::Vect3& point, + double& outDisSq, + double& outV, + double& outW, + uint32_t& outFaceIndex, + VHACD::Vect3& closest) const; + + VHACD::BoundsAABB CalculateFaceBounds(uint32_t* faces, + uint32_t numFaces); + + // track the next free node + uint32_t m_freeNode; + + const std::vector* m_vertices{ nullptr }; + const std::vector* m_indices{ nullptr }; + + std::vector m_faces; + std::vector m_nodes; + std::vector m_faceBounds; + + // stats + uint32_t m_treeDepth{ 0 }; + uint32_t m_innerNodes{ 0 }; + uint32_t m_leafNodes{ 0 }; + + uint32_t s_depth{ 0 }; +}; + +AABBTree::FaceSorter::FaceSorter(const std::vector& positions, + const std::vector& indices, + uint32_t axis) + : m_vertices(positions) + , m_indices(indices) + , m_axis(axis) +{ +} + +inline bool AABBTree::FaceSorter::operator()(uint32_t lhs, + uint32_t rhs) const +{ + double a = GetCentroid(lhs); + double b = GetCentroid(rhs); + + if (a == b) + { + return lhs < rhs; + } + else + { + return a < b; + } +} + +inline double AABBTree::FaceSorter::GetCentroid(uint32_t face) const +{ + const VHACD::Vect3& a = m_vertices[m_indices[face].mI0]; + const VHACD::Vect3& b = m_vertices[m_indices[face].mI1]; + const VHACD::Vect3& c = m_vertices[m_indices[face].mI2]; + + return (a[m_axis] + b[m_axis] + c[m_axis]) / double(3.0); +} + +AABBTree::AABBTree(const std::vector& vertices, + const std::vector& indices) + : m_vertices(&vertices) + , m_indices(&indices) +{ + Build(); +} + +bool AABBTree::TraceRay(const VHACD::Vect3& start, + const VHACD::Vect3& to, + double& outT, + double& faceSign, + VHACD::Vect3& hitLocation) const +{ + VHACD::Vect3 dir = to - start; + double distance = dir.Normalize(); + double u, v, w; + uint32_t faceIndex; + bool hit = TraceRay(start, + dir, + outT, + u, + v, + w, + faceSign, + faceIndex); + if (hit) + { + hitLocation = start + dir * outT; + } + + if (hit && outT > distance) + { + hit = false; + } + return hit; +} + +bool AABBTree::TraceRay(const VHACD::Vect3& start, + const VHACD::Vect3& dir, + uint32_t& insideCount, + uint32_t& outsideCount) const +{ + double outT, u, v, w, faceSign; + uint32_t faceIndex; + bool hit = TraceRay(start, + dir, + outT, + u, + v, + w, + faceSign, + faceIndex); + if (hit) + { + if (faceSign >= 0) + { + insideCount++; + } + else + { + outsideCount++; + } + } + return hit; +} + +bool AABBTree::TraceRay(const VHACD::Vect3& start, + const VHACD::Vect3& dir, + double& outT, + double& u, + double& v, + double& w, + double& faceSign, + uint32_t& faceIndex) const +{ + outT = FLT_MAX; + TraceRecursive(0, + start, + dir, + outT, + u, + v, + w, + faceSign, + faceIndex); + return (outT != FLT_MAX); +} + +VHACD::Vect3 AABBTree::GetCenter() const +{ + return m_nodes[0].m_extents.GetCenter(); +} + +VHACD::Vect3 AABBTree::GetMinExtents() const +{ + return m_nodes[0].m_extents.GetMin(); +} + +VHACD::Vect3 AABBTree::GetMaxExtents() const +{ + return m_nodes[0].m_extents.GetMax(); +} + +bool AABBTree::GetClosestPointWithinDistance(const VHACD::Vect3& point, + double maxDistance, + VHACD::Vect3& closestPoint) const +{ + double dis, v, w; + uint32_t faceIndex; + bool hit = GetClosestPointWithinDistance(point, + maxDistance, + dis, + v, + w, + faceIndex, + closestPoint); + return hit; +} + +// partition faces around the median face +uint32_t AABBTree::PartitionMedian(Node& n, + uint32_t* faces, + uint32_t numFaces) +{ + FaceSorter predicate(*m_vertices, + *m_indices, + n.m_extents.GetSize().LongestAxis()); + std::nth_element(faces, + faces + numFaces / 2, + faces + numFaces, + predicate); + + return numFaces / 2; +} + +// partition faces based on the surface area heuristic +uint32_t AABBTree::PartitionSAH(Node&, + uint32_t* faces, + uint32_t numFaces) +{ + uint32_t bestAxis = 0; + uint32_t bestIndex = 0; + double bestCost = FLT_MAX; + + for (uint32_t a = 0; a < 3; ++a) + { + // sort faces by centroids + FaceSorter predicate(*m_vertices, + *m_indices, + a); + std::sort(faces, + faces + numFaces, + predicate); + + // two passes over data to calculate upper and lower bounds + std::vector cumulativeLower(numFaces); + std::vector cumulativeUpper(numFaces); + + VHACD::BoundsAABB lower; + VHACD::BoundsAABB upper; + + for (uint32_t i = 0; i < numFaces; ++i) + { + lower.Union(m_faceBounds[faces[i]]); + upper.Union(m_faceBounds[faces[numFaces - i - 1]]); + + cumulativeLower[i] = lower.SurfaceArea(); + cumulativeUpper[numFaces - i - 1] = upper.SurfaceArea(); + } + + double invTotalSA = double(1.0) / cumulativeUpper[0]; + + // test all split positions + for (uint32_t i = 0; i < numFaces - 1; ++i) + { + double pBelow = cumulativeLower[i] * invTotalSA; + double pAbove = cumulativeUpper[i] * invTotalSA; + + double cost = double(0.125) + (pBelow * i + pAbove * (numFaces - i)); + if (cost <= bestCost) + { + bestCost = cost; + bestIndex = i; + bestAxis = a; + } + } + } + + // re-sort by best axis + FaceSorter predicate(*m_vertices, + *m_indices, + bestAxis); + std::sort(faces, + faces + numFaces, + predicate); + + return bestIndex + 1; +} + +void AABBTree::Build() +{ + const uint32_t numFaces = uint32_t(m_indices->size()); + + // build initial list of faces + m_faces.reserve(numFaces); + + // calculate bounds of each face and store + m_faceBounds.reserve(numFaces); + + std::vector stack; + for (uint32_t i = 0; i < numFaces; ++i) + { + VHACD::BoundsAABB top = CalculateFaceBounds(&i, + 1); + + m_faces.push_back(i); + m_faceBounds.push_back(top); + } + + m_nodes.reserve(uint32_t(numFaces * double(1.5))); + + // allocate space for all the nodes + m_freeNode = 1; + + // start building + BuildRecursive(0, + m_faces.data(), + numFaces); + + assert(s_depth == 0); +} + +void AABBTree::BuildRecursive(uint32_t nodeIndex, + uint32_t* faces, + uint32_t numFaces) +{ + const uint32_t kMaxFacesPerLeaf = 6; + + // if we've run out of nodes allocate some more + if (nodeIndex >= m_nodes.size()) + { + uint32_t s = std::max(uint32_t(double(1.5) * m_nodes.size()), 512U); + m_nodes.resize(s); + } + + // a reference to the current node, need to be careful here as this reference may become invalid if array is resized + Node& n = m_nodes[nodeIndex]; + + // track max tree depth + ++s_depth; + m_treeDepth = std::max(m_treeDepth, s_depth); + + n.m_extents = CalculateFaceBounds(faces, + numFaces); + + // calculate bounds of faces and add node + if (numFaces <= kMaxFacesPerLeaf) + { + n.m_faces = faces; + n.m_numFaces = numFaces; + + ++m_leafNodes; + } + else + { + ++m_innerNodes; + + // face counts for each branch + const uint32_t leftCount = PartitionMedian(n, faces, numFaces); + // const uint32_t leftCount = PartitionSAH(n, faces, numFaces); + const uint32_t rightCount = numFaces - leftCount; + + // alloc 2 nodes + m_nodes[nodeIndex].m_children = m_freeNode; + + // allocate two nodes + m_freeNode += 2; + + // split faces in half and build each side recursively + BuildRecursive(m_nodes[nodeIndex].m_children + 0, faces, leftCount); + BuildRecursive(m_nodes[nodeIndex].m_children + 1, faces + leftCount, rightCount); + } + + --s_depth; +} + +void AABBTree::TraceRecursive(uint32_t nodeIndex, + const VHACD::Vect3& start, + const VHACD::Vect3& dir, + double& outT, + double& outU, + double& outV, + double& outW, + double& faceSign, + uint32_t& faceIndex) const +{ + const Node& node = m_nodes[nodeIndex]; + + if (node.m_faces == NULL) + { + // find closest node + const Node& leftChild = m_nodes[node.m_children + 0]; + const Node& rightChild = m_nodes[node.m_children + 1]; + + double dist[2] = { FLT_MAX, FLT_MAX }; + + IntersectRayAABB(start, + dir, + leftChild.m_extents, + dist[0]); + IntersectRayAABB(start, + dir, + rightChild.m_extents, + dist[1]); + + uint32_t closest = 0; + uint32_t furthest = 1; + + if (dist[1] < dist[0]) + { + closest = 1; + furthest = 0; + } + + if (dist[closest] < outT) + { + TraceRecursive(node.m_children + closest, + start, + dir, + outT, + outU, + outV, + outW, + faceSign, + faceIndex); + } + + if (dist[furthest] < outT) + { + TraceRecursive(node.m_children + furthest, + start, + dir, + outT, + outU, + outV, + outW, + faceSign, + faceIndex); + } + } + else + { + double t, u, v, w, s; + + for (uint32_t i = 0; i < node.m_numFaces; ++i) + { + uint32_t indexStart = node.m_faces[i]; + + const VHACD::Vect3& a = (*m_vertices)[(*m_indices)[indexStart].mI0]; + const VHACD::Vect3& b = (*m_vertices)[(*m_indices)[indexStart].mI1]; + const VHACD::Vect3& c = (*m_vertices)[(*m_indices)[indexStart].mI2]; + if (IntersectRayTriTwoSided(start, dir, a, b, c, t, u, v, w, s, NULL)) + { + if (t < outT) + { + outT = t; + outU = u; + outV = v; + outW = w; + faceSign = s; + faceIndex = node.m_faces[i]; + } + } + } + } +} + +bool AABBTree::GetClosestPointWithinDistance(const VHACD::Vect3& point, + const double maxDis, + double& dis, + double& v, + double& w, + uint32_t& faceIndex, + VHACD::Vect3& closest) const +{ + dis = maxDis; + faceIndex = uint32_t(~0); + double disSq = dis * dis; + + GetClosestPointWithinDistanceSqRecursive(0, + point, + disSq, + v, + w, + faceIndex, + closest); + dis = sqrt(disSq); + + return (faceIndex < (~(static_cast(0)))); +} + +void AABBTree::GetClosestPointWithinDistanceSqRecursive(uint32_t nodeIndex, + const VHACD::Vect3& point, + double& outDisSq, + double& outV, + double& outW, + uint32_t& outFaceIndex, + VHACD::Vect3& closestPoint) const +{ + const Node& node = m_nodes[nodeIndex]; + + if (node.m_faces == nullptr) + { + // find closest node + const Node& leftChild = m_nodes[node.m_children + 0]; + const Node& rightChild = m_nodes[node.m_children + 1]; + + // double dist[2] = { FLT_MAX, FLT_MAX }; + VHACD::Vect3 lp = leftChild.m_extents.ClosestPoint(point); + VHACD::Vect3 rp = rightChild.m_extents.ClosestPoint(point); + + + uint32_t closest = 0; + uint32_t furthest = 1; + double dcSq = (point - lp).GetNormSquared(); + double dfSq = (point - rp).GetNormSquared(); + if (dfSq < dcSq) + { + closest = 1; + furthest = 0; + std::swap(dfSq, dcSq); + } + + if (dcSq < outDisSq) + { + GetClosestPointWithinDistanceSqRecursive(node.m_children + closest, + point, + outDisSq, + outV, + outW, + outFaceIndex, + closestPoint); + } + + if (dfSq < outDisSq) + { + GetClosestPointWithinDistanceSqRecursive(node.m_children + furthest, + point, + outDisSq, + outV, + outW, + outFaceIndex, + closestPoint); + } + } + else + { + + double v, w; + for (uint32_t i = 0; i < node.m_numFaces; ++i) + { + uint32_t indexStart = node.m_faces[i]; + + const VHACD::Vect3& a = (*m_vertices)[(*m_indices)[indexStart].mI0]; + const VHACD::Vect3& b = (*m_vertices)[(*m_indices)[indexStart].mI1]; + const VHACD::Vect3& c = (*m_vertices)[(*m_indices)[indexStart].mI2]; + + VHACD::Vect3 cp = ClosestPointOnTriangle(a, b, c, point, v, w); + double disSq = (cp - point).GetNormSquared(); + + if (disSq < outDisSq) + { + closestPoint = cp; + outDisSq = disSq; + outV = v; + outW = w; + outFaceIndex = node.m_faces[i]; + } + } + } +} + +VHACD::BoundsAABB AABBTree::CalculateFaceBounds(uint32_t* faces, + uint32_t numFaces) +{ + VHACD::Vect3 minExtents( FLT_MAX); + VHACD::Vect3 maxExtents(-FLT_MAX); + + // calculate face bounds + for (uint32_t i = 0; i < numFaces; ++i) + { + VHACD::Vect3 a = (*m_vertices)[(*m_indices)[faces[i]].mI0]; + VHACD::Vect3 b = (*m_vertices)[(*m_indices)[faces[i]].mI1]; + VHACD::Vect3 c = (*m_vertices)[(*m_indices)[faces[i]].mI2]; + + minExtents = a.CWiseMin(minExtents); + maxExtents = a.CWiseMax(maxExtents); + + minExtents = b.CWiseMin(minExtents); + maxExtents = b.CWiseMax(maxExtents); + + minExtents = c.CWiseMin(minExtents); + maxExtents = c.CWiseMax(maxExtents); + } + + return VHACD::BoundsAABB(minExtents, + maxExtents); +} + +enum class VoxelValue : uint8_t +{ + PRIMITIVE_UNDEFINED = 0, + PRIMITIVE_OUTSIDE_SURFACE_TOWALK = 1, + PRIMITIVE_OUTSIDE_SURFACE = 2, + PRIMITIVE_INSIDE_SURFACE = 3, + PRIMITIVE_ON_SURFACE = 4 +}; + +class Volume +{ +public: + void Voxelize(const std::vector& points, + const std::vector& triangles, + const size_t dim, + FillMode fillMode, + const AABBTree& aabbTree); + + void RaycastFill(const AABBTree& aabbTree); + + void SetVoxel(const size_t i, + const size_t j, + const size_t k, + VoxelValue value); + + VoxelValue& GetVoxel(const size_t i, + const size_t j, + const size_t k); + + const VoxelValue& GetVoxel(const size_t i, + const size_t j, + const size_t k) const; + + const std::vector& GetSurfaceVoxels() const; + const std::vector& GetInteriorVoxels() const; + + double GetScale() const; + const VHACD::BoundsAABB& GetBounds() const; + const VHACD::Vector3& GetDimensions() const; + + VHACD::BoundsAABB m_bounds; + double m_scale{ 1.0 }; + VHACD::Vector3 m_dim{ 0 }; + size_t m_numVoxelsOnSurface{ 0 }; + size_t m_numVoxelsInsideSurface{ 0 }; + size_t m_numVoxelsOutsideSurface{ 0 }; + std::vector m_data; +private: + + void MarkOutsideSurface(const size_t i0, + const size_t j0, + const size_t k0, + const size_t i1, + const size_t j1, + const size_t k1); + void FillOutsideSurface(); + + void FillInsideSurface(); + + std::vector m_surfaceVoxels; + std::vector m_interiorVoxels; +}; + +bool PlaneBoxOverlap(const VHACD::Vect3& normal, + const VHACD::Vect3& vert, + const VHACD::Vect3& maxbox) +{ + int32_t q; + VHACD::Vect3 vmin; + VHACD::Vect3 vmax; + double v; + for (q = 0; q < 3; q++) + { + v = vert[q]; + if (normal[q] > double(0.0)) + { + vmin[q] = -maxbox[q] - v; + vmax[q] = maxbox[q] - v; + } + else + { + vmin[q] = maxbox[q] - v; + vmax[q] = -maxbox[q] - v; + } + } + if (normal.Dot(vmin) > double(0.0)) + return false; + if (normal.Dot(vmax) >= double(0.0)) + return true; + return false; +} + +bool AxisTest(double a, double b, double fa, double fb, + double v0, double v1, double v2, double v3, + double boxHalfSize1, double boxHalfSize2) +{ + double p0 = a * v0 + b * v1; + double p1 = a * v2 + b * v3; + + double min = std::min(p0, p1); + double max = std::max(p0, p1); + + double rad = fa * boxHalfSize1 + fb * boxHalfSize2; + if (min > rad || max < -rad) + { + return false; + } + + return true; +} + +bool TriBoxOverlap(const VHACD::Vect3& boxCenter, + const VHACD::Vect3& boxHalfSize, + const VHACD::Vect3& triVer0, + const VHACD::Vect3& triVer1, + const VHACD::Vect3& triVer2) +{ + /* use separating axis theorem to test overlap between triangle and box */ + /* need to test for overlap in these directions: */ + /* 1) the {x,y,z}-directions (actually, since we use the AABB of the triangle */ + /* we do not even need to test these) */ + /* 2) normal of the triangle */ + /* 3) crossproduct(edge from tri, {x,y,z}-direction) */ + /* this gives 3x3=9 more tests */ + + VHACD::Vect3 v0 = triVer0 - boxCenter; + VHACD::Vect3 v1 = triVer1 - boxCenter; + VHACD::Vect3 v2 = triVer2 - boxCenter; + VHACD::Vect3 e0 = v1 - v0; + VHACD::Vect3 e1 = v2 - v1; + VHACD::Vect3 e2 = v0 - v2; + + /* This is the fastest branch on Sun */ + /* move everything so that the boxcenter is in (0,0,0) */ + + /* Bullet 3: */ + /* test the 9 tests first (this was faster) */ + double fex = fabs(e0[0]); + double fey = fabs(e0[1]); + double fez = fabs(e0[2]); + + /* + * These should use Get*() instead of subscript for consistency, but the function calls are long enough already + */ + if (!AxisTest( e0[2], -e0[1], fez, fey, v0[1], v0[2], v2[1], v2[2], boxHalfSize[1], boxHalfSize[2])) return 0; // X01 + if (!AxisTest(-e0[2], e0[0], fez, fex, v0[0], v0[2], v2[0], v2[2], boxHalfSize[0], boxHalfSize[2])) return 0; // Y02 + if (!AxisTest( e0[1], -e0[0], fey, fex, v1[0], v1[1], v2[0], v2[1], boxHalfSize[0], boxHalfSize[1])) return 0; // Z12 + + fex = fabs(e1[0]); + fey = fabs(e1[1]); + fez = fabs(e1[2]); + + if (!AxisTest( e1[2], -e1[1], fez, fey, v0[1], v0[2], v2[1], v2[2], boxHalfSize[1], boxHalfSize[2])) return 0; // X01 + if (!AxisTest(-e1[2], e1[0], fez, fex, v0[0], v0[2], v2[0], v2[2], boxHalfSize[0], boxHalfSize[2])) return 0; // Y02 + if (!AxisTest( e1[1], -e1[0], fey, fex, v0[0], v0[1], v1[0], v1[1], boxHalfSize[0], boxHalfSize[2])) return 0; // Z0 + + fex = fabs(e2[0]); + fey = fabs(e2[1]); + fez = fabs(e2[2]); + + if (!AxisTest( e2[2], -e2[1], fez, fey, v0[1], v0[2], v1[1], v1[2], boxHalfSize[1], boxHalfSize[2])) return 0; // X2 + if (!AxisTest(-e2[2], e2[0], fez, fex, v0[0], v0[2], v1[0], v1[2], boxHalfSize[0], boxHalfSize[2])) return 0; // Y1 + if (!AxisTest( e2[1], -e2[0], fey, fex, v1[0], v1[1], v2[0], v2[1], boxHalfSize[0], boxHalfSize[1])) return 0; // Z12 + + /* Bullet 1: */ + /* first test overlap in the {x,y,z}-directions */ + /* find min, max of the triangle each direction, and test for overlap in */ + /* that direction -- this is equivalent to testing a minimal AABB around */ + /* the triangle against the AABB */ + + /* test in 0-direction */ + double min = std::min({v0.GetX(), v1.GetX(), v2.GetX()}); + double max = std::max({v0.GetX(), v1.GetX(), v2.GetX()}); + if (min > boxHalfSize[0] || max < -boxHalfSize[0]) + return false; + + /* test in 1-direction */ + min = std::min({v0.GetY(), v1.GetY(), v2.GetY()}); + max = std::max({v0.GetY(), v1.GetY(), v2.GetY()}); + if (min > boxHalfSize[1] || max < -boxHalfSize[1]) + return false; + + /* test in getZ-direction */ + min = std::min({v0.GetZ(), v1.GetZ(), v2.GetZ()}); + max = std::max({v0.GetZ(), v1.GetZ(), v2.GetZ()}); + if (min > boxHalfSize[2] || max < -boxHalfSize[2]) + return false; + + /* Bullet 2: */ + /* test if the box intersects the plane of the triangle */ + /* compute plane equation of triangle: normal*x+d=0 */ + VHACD::Vect3 normal = e0.Cross(e1); + + if (!PlaneBoxOverlap(normal, v0, boxHalfSize)) + return false; + return true; /* box and triangle overlaps */ +} + +void Volume::Voxelize(const std::vector& points, + const std::vector& indices, + const size_t dimensions, + FillMode fillMode, + const AABBTree& aabbTree) +{ + double a = std::pow(dimensions, 0.33); + size_t dim = a * double(1.5); + dim = std::max(dim, size_t(32)); + + if (points.size() == 0) + { + return; + } + + m_bounds = BoundsAABB(points); + + VHACD::Vect3 d = m_bounds.GetSize(); + double r; + // Equal comparison is important here to avoid taking the last branch when d[0] == d[1] with d[2] being the smallest + // dimension. That would lead to dimensions in i and j to be a lot bigger than expected and make the amount of + // voxels in the volume totally unmanageable. + if (d[0] >= d[1] && d[0] >= d[2]) + { + r = d[0]; + m_dim[0] = uint32_t(dim); + m_dim[1] = uint32_t(2 + static_cast(dim * d[1] / d[0])); + m_dim[2] = uint32_t(2 + static_cast(dim * d[2] / d[0])); + } + else if (d[1] >= d[0] && d[1] >= d[2]) + { + r = d[1]; + m_dim[1] = uint32_t(dim); + m_dim[0] = uint32_t(2 + static_cast(dim * d[0] / d[1])); + m_dim[2] = uint32_t(2 + static_cast(dim * d[2] / d[1])); + } + else + { + r = d[2]; + m_dim[2] = uint32_t(dim); + m_dim[0] = uint32_t(2 + static_cast(dim * d[0] / d[2])); + m_dim[1] = uint32_t(2 + static_cast(dim * d[1] / d[2])); + } + + m_scale = r / (dim - 1); + double invScale = (dim - 1) / r; + + m_data = std::vector(m_dim[0] * m_dim[1] * m_dim[2], + VoxelValue::PRIMITIVE_UNDEFINED); + m_numVoxelsOnSurface = 0; + m_numVoxelsInsideSurface = 0; + m_numVoxelsOutsideSurface = 0; + + VHACD::Vect3 p[3]; + VHACD::Vect3 boxcenter; + VHACD::Vect3 pt; + const VHACD::Vect3 boxhalfsize(double(0.5)); + for (size_t t = 0; t < indices.size(); ++t) + { + size_t i0, j0, k0; + size_t i1, j1, k1; + VHACD::Vector3 tri = indices[t]; + for (int32_t c = 0; c < 3; ++c) + { + pt = points[tri[c]]; + + p[c] = (pt - m_bounds.GetMin()) * invScale; + + size_t i = static_cast(p[c][0] + double(0.5)); + size_t j = static_cast(p[c][1] + double(0.5)); + size_t k = static_cast(p[c][2] + double(0.5)); + + assert(i < m_dim[0] && j < m_dim[1] && k < m_dim[2]); + + if (c == 0) + { + i0 = i1 = i; + j0 = j1 = j; + k0 = k1 = k; + } + else + { + i0 = std::min(i0, i); + j0 = std::min(j0, j); + k0 = std::min(k0, k); + + i1 = std::max(i1, i); + j1 = std::max(j1, j); + k1 = std::max(k1, k); + } + } + if (i0 > 0) + --i0; + if (j0 > 0) + --j0; + if (k0 > 0) + --k0; + if (i1 < m_dim[0]) + ++i1; + if (j1 < m_dim[1]) + ++j1; + if (k1 < m_dim[2]) + ++k1; + for (size_t i_id = i0; i_id < i1; ++i_id) + { + boxcenter[0] = uint32_t(i_id); + for (size_t j_id = j0; j_id < j1; ++j_id) + { + boxcenter[1] = uint32_t(j_id); + for (size_t k_id = k0; k_id < k1; ++k_id) + { + boxcenter[2] = uint32_t(k_id); + bool res = TriBoxOverlap(boxcenter, + boxhalfsize, + p[0], + p[1], + p[2]); + VoxelValue& value = GetVoxel(i_id, + j_id, + k_id); + if ( res + && value == VoxelValue::PRIMITIVE_UNDEFINED) + { + value = VoxelValue::PRIMITIVE_ON_SURFACE; + ++m_numVoxelsOnSurface; + m_surfaceVoxels.emplace_back(uint32_t(i_id), + uint32_t(j_id), + uint32_t(k_id)); + } + } + } + } + } + + if (fillMode == FillMode::SURFACE_ONLY) + { + const size_t i0_local = m_dim[0]; + const size_t j0_local = m_dim[1]; + const size_t k0_local = m_dim[2]; + for (size_t i_id = 0; i_id < i0_local; ++i_id) + { + for (size_t j_id = 0; j_id < j0_local; ++j_id) + { + for (size_t k_id = 0; k_id < k0_local; ++k_id) + { + const VoxelValue& voxel = GetVoxel(i_id, + j_id, + k_id); + if (voxel != VoxelValue::PRIMITIVE_ON_SURFACE) + { + SetVoxel(i_id, + j_id, + k_id, + VoxelValue::PRIMITIVE_OUTSIDE_SURFACE); + } + } + } + } + } + else if (fillMode == FillMode::FLOOD_FILL) + { + /* + * Marking the outside edges of the voxel cube to be outside surfaces to walk + */ + MarkOutsideSurface(0, 0, 0, m_dim[0], m_dim[1], 1); + MarkOutsideSurface(0, 0, m_dim[2] - 1, m_dim[0], m_dim[1], m_dim[2]); + MarkOutsideSurface(0, 0, 0, m_dim[0], 1, m_dim[2]); + MarkOutsideSurface(0, m_dim[1] - 1, 0, m_dim[0], m_dim[1], m_dim[2]); + MarkOutsideSurface(0, 0, 0, 1, m_dim[1], m_dim[2]); + MarkOutsideSurface(m_dim[0] - 1, 0, 0, m_dim[0], m_dim[1], m_dim[2]); + FillOutsideSurface(); + FillInsideSurface(); + } + else if (fillMode == FillMode::RAYCAST_FILL) + { + RaycastFill(aabbTree); + } +} + +void Volume::RaycastFill(const AABBTree& aabbTree) +{ + const uint32_t i0 = m_dim[0]; + const uint32_t j0 = m_dim[1]; + const uint32_t k0 = m_dim[2]; + + size_t maxSize = i0 * j0 * k0; + + std::vector temp; + temp.reserve(maxSize); + uint32_t count{ 0 }; + m_numVoxelsInsideSurface = 0; + for (uint32_t i = 0; i < i0; ++i) + { + for (uint32_t j = 0; j < j0; ++j) + { + for (uint32_t k = 0; k < k0; ++k) + { + VoxelValue& voxel = GetVoxel(i, j, k); + if (voxel != VoxelValue::PRIMITIVE_ON_SURFACE) + { + VHACD::Vect3 start = VHACD::Vect3(i, j, k) * m_scale + m_bounds.GetMin(); + + uint32_t insideCount = 0; + uint32_t outsideCount = 0; + + VHACD::Vect3 directions[6] = { + VHACD::Vect3( 1, 0, 0), + VHACD::Vect3(-1, 0, 0), // this was 1, 0, 0 in the original code, but looks wrong + VHACD::Vect3( 0, 1, 0), + VHACD::Vect3( 0, -1, 0), + VHACD::Vect3( 0, 0, 1), + VHACD::Vect3( 0, 0, -1) + }; + + for (uint32_t r = 0; r < 6; r++) + { + aabbTree.TraceRay(start, + directions[r], + insideCount, + outsideCount); + // Early out if we hit the outside of the mesh + if (outsideCount) + { + break; + } + // Early out if we accumulated 3 inside hits + if (insideCount >= 3) + { + break; + } + } + + if (outsideCount == 0 && insideCount >= 3) + { + voxel = VoxelValue::PRIMITIVE_INSIDE_SURFACE; + temp.emplace_back(i, j, k); + count++; + m_numVoxelsInsideSurface++; + } + else + { + voxel = VoxelValue::PRIMITIVE_OUTSIDE_SURFACE; + } + } + } + } + } + + if (count) + { + m_interiorVoxels = std::move(temp); + } +} + +void Volume::SetVoxel(const size_t i, + const size_t j, + const size_t k, + VoxelValue value) +{ + assert(i < m_dim[0]); + assert(j < m_dim[1]); + assert(k < m_dim[2]); + + m_data[k + j * m_dim[2] + i * m_dim[1] * m_dim[2]] = value; +} + +VoxelValue& Volume::GetVoxel(const size_t i, + const size_t j, + const size_t k) +{ + assert(i < m_dim[0]); + assert(j < m_dim[1]); + assert(k < m_dim[2]); + return m_data[k + j * m_dim[2] + i * m_dim[1] * m_dim[2]]; +} + +const VoxelValue& Volume::GetVoxel(const size_t i, + const size_t j, + const size_t k) const +{ + assert(i < m_dim[0]); + assert(j < m_dim[1]); + assert(k < m_dim[2]); + return m_data[k + j * m_dim[2] + i * m_dim[1] * m_dim[2]]; +} + +const std::vector& Volume::GetSurfaceVoxels() const +{ + return m_surfaceVoxels; +} + +const std::vector& Volume::GetInteriorVoxels() const +{ + return m_interiorVoxels; +} + +double Volume::GetScale() const +{ + return m_scale; +} + +const VHACD::BoundsAABB& Volume::GetBounds() const +{ + return m_bounds; +} + +const VHACD::Vector3& Volume::GetDimensions() const +{ + return m_dim; +} + +void Volume::MarkOutsideSurface(const size_t i0, + const size_t j0, + const size_t k0, + const size_t i1, + const size_t j1, + const size_t k1) +{ + for (size_t i = i0; i < i1; ++i) + { + for (size_t j = j0; j < j1; ++j) + { + for (size_t k = k0; k < k1; ++k) + { + VoxelValue& v = GetVoxel(i, j, k); + if (v == VoxelValue::PRIMITIVE_UNDEFINED) + { + v = VoxelValue::PRIMITIVE_OUTSIDE_SURFACE_TOWALK; + } + } + } + } +} + +inline void WalkForward(int64_t start, + int64_t end, + VoxelValue* ptr, + int64_t stride, + int64_t maxDistance) +{ + for (int64_t i = start, count = 0; + count < maxDistance && i < end && *ptr == VoxelValue::PRIMITIVE_UNDEFINED; + ++i, ptr += stride, ++count) + { + *ptr = VoxelValue::PRIMITIVE_OUTSIDE_SURFACE_TOWALK; + } +} + +inline void WalkBackward(int64_t start, + int64_t end, + VoxelValue* ptr, + int64_t stride, + int64_t maxDistance) +{ + for (int64_t i = start, count = 0; + count < maxDistance && i >= end && *ptr == VoxelValue::PRIMITIVE_UNDEFINED; + --i, ptr -= stride, ++count) + { + *ptr = VoxelValue::PRIMITIVE_OUTSIDE_SURFACE_TOWALK; + } +} + +void Volume::FillOutsideSurface() +{ + size_t voxelsWalked = 0; + const int64_t i0 = m_dim[0]; + const int64_t j0 = m_dim[1]; + const int64_t k0 = m_dim[2]; + + // Avoid striding too far in each direction to stay in L1 cache as much as possible. + // The cache size required for the walk is roughly (4 * walkDistance * 64) since + // the k direction doesn't count as it's walking byte per byte directly in a cache lines. + // ~16k is required for a walk distance of 64 in each directions. + const size_t walkDistance = 64; + + // using the stride directly instead of calling GetVoxel for each iterations saves + // a lot of multiplications and pipeline stalls due to data dependencies on imul. + const size_t istride = &GetVoxel(1, 0, 0) - &GetVoxel(0, 0, 0); + const size_t jstride = &GetVoxel(0, 1, 0) - &GetVoxel(0, 0, 0); + const size_t kstride = &GetVoxel(0, 0, 1) - &GetVoxel(0, 0, 0); + + // It might seem counter intuitive to go over the whole voxel range multiple times + // but since we do the run in memory order, it leaves us with far fewer cache misses + // than a BFS algorithm and it has the additional benefit of not requiring us to + // store and manipulate a fifo for recursion that might become huge when the number + // of voxels is large. + // This will outperform the BFS algorithm by several orders of magnitude in practice. + do + { + voxelsWalked = 0; + for (int64_t i = 0; i < i0; ++i) + { + for (int64_t j = 0; j < j0; ++j) + { + for (int64_t k = 0; k < k0; ++k) + { + VoxelValue& voxel = GetVoxel(i, j, k); + if (voxel == VoxelValue::PRIMITIVE_OUTSIDE_SURFACE_TOWALK) + { + voxelsWalked++; + voxel = VoxelValue::PRIMITIVE_OUTSIDE_SURFACE; + + // walk in each direction to mark other voxel that should be walked. + // this will generate a 3d pattern that will help the overall + // algorithm converge faster while remaining cache friendly. + WalkForward(k + 1, k0, &voxel + kstride, kstride, walkDistance); + WalkBackward(k - 1, 0, &voxel - kstride, kstride, walkDistance); + + WalkForward(j + 1, j0, &voxel + jstride, jstride, walkDistance); + WalkBackward(j - 1, 0, &voxel - jstride, jstride, walkDistance); + + WalkForward(i + 1, i0, &voxel + istride, istride, walkDistance); + WalkBackward(i - 1, 0, &voxel - istride, istride, walkDistance); + } + } + } + } + + m_numVoxelsOutsideSurface += voxelsWalked; + } while (voxelsWalked != 0); +} + +void Volume::FillInsideSurface() +{ + const uint32_t i0 = uint32_t(m_dim[0]); + const uint32_t j0 = uint32_t(m_dim[1]); + const uint32_t k0 = uint32_t(m_dim[2]); + + size_t maxSize = i0 * j0 * k0; + + std::vector temp; + temp.reserve(maxSize); + uint32_t count{ 0 }; + + for (uint32_t i = 0; i < i0; ++i) + { + for (uint32_t j = 0; j < j0; ++j) + { + for (uint32_t k = 0; k < k0; ++k) + { + VoxelValue& v = GetVoxel(i, j, k); + if (v == VoxelValue::PRIMITIVE_UNDEFINED) + { + v = VoxelValue::PRIMITIVE_INSIDE_SURFACE; + temp.emplace_back(i, j, k); + count++; + ++m_numVoxelsInsideSurface; + } + } + } + } + + if ( count ) + { + m_interiorVoxels = std::move(temp); + } +} + +//****************************************************************************************** +// ShrinkWrap helper class +//****************************************************************************************** +// This is a code snippet which 'shrinkwraps' a convex hull +// to a source mesh. +// +// It is a somewhat complicated algorithm. It works as follows: +// +// * Step #1 : Compute the mean unit normal vector for each vertex in the convex hull +// * Step #2 : For each vertex in the conex hull we project is slightly outwards along the mean normal vector +// * Step #3 : We then raycast from this slightly extruded point back into the opposite direction of the mean normal vector +// resulting in a raycast from slightly beyond the vertex in the hull into the source mesh we are trying +// to 'shrink wrap' against +// * Step #4 : If the raycast fails we leave the original vertex alone +// * Step #5 : If the raycast hits a backface we leave the original vertex alone +// * Step #6 : If the raycast hits too far away (no more than a certain threshold distance) we live it alone +// * Step #7 : If the point we hit on the source mesh is not still within the convex hull, we reject it. +// * Step #8 : If all of the previous conditions are met, then we take the raycast hit location as the 'new position' +// * Step #9 : Once all points have been projected, if possible, we need to recompute the convex hull again based on these shrinkwrapped points +// * Step #10 : In theory that should work.. let's see... + +//*********************************************************************************************** +// QuickHull implementation +//*********************************************************************************************** + +////////////////////////////////////////////////////////////////////////// +// Quickhull base class holding the hull during construction +////////////////////////////////////////////////////////////////////////// +class QuickHull +{ +public: + uint32_t ComputeConvexHull(const std::vector& vertices, + uint32_t maxHullVertices); + + const std::vector& GetVertices() const; + const std::vector& GetIndices() const; + +private: + std::vector m_vertices; + std::vector m_indices; +}; + +uint32_t QuickHull::ComputeConvexHull(const std::vector& vertices, + uint32_t maxHullVertices) +{ + m_indices.clear(); + + VHACD::ConvexHull ch(vertices, + double(0.0001), + maxHullVertices); + + auto& vlist = ch.GetVertexPool(); + if ( !vlist.empty() ) + { + size_t vcount = vlist.size(); + m_vertices.resize(vcount); + std::copy(vlist.begin(), + vlist.end(), + m_vertices.begin()); + } + + for (std::list::const_iterator node = ch.GetList().begin(); node != ch.GetList().end(); ++node) + { + const VHACD::ConvexHullFace& face = *node; + m_indices.emplace_back(face.m_index[0], + face.m_index[1], + face.m_index[2]); + } + + return uint32_t(m_indices.size()); +} + +const std::vector& QuickHull::GetVertices() const +{ + return m_vertices; +} + +const std::vector& QuickHull::GetIndices() const +{ + return m_indices; +} + +//****************************************************************************************** +// Implementation of the ShrinkWrap function +//****************************************************************************************** + +void ShrinkWrap(SimpleMesh& sourceConvexHull, + const AABBTree& aabbTree, + uint32_t maxHullVertexCount, + double distanceThreshold, + bool doShrinkWrap) +{ + std::vector verts; // New verts for the new convex hull + verts.reserve(sourceConvexHull.m_vertices.size()); + // Examine each vertex and see if it is within the voxel distance. + // If it is, then replace the point with the shrinkwrapped / projected point + for (uint32_t j = 0; j < sourceConvexHull.m_vertices.size(); j++) + { + VHACD::Vertex& p = sourceConvexHull.m_vertices[j]; + if (doShrinkWrap) + { + VHACD::Vect3 closest; + if (aabbTree.GetClosestPointWithinDistance(p, distanceThreshold, closest)) + { + p = closest; + } + } + verts.emplace_back(p); + } + // Final step is to recompute the convex hull + VHACD::QuickHull qh; + uint32_t tcount = qh.ComputeConvexHull(verts, + maxHullVertexCount); + if (tcount) + { + sourceConvexHull.m_vertices = qh.GetVertices(); + sourceConvexHull.m_indices = qh.GetIndices(); + } +} + +//******************************************************************************************************************** + +#if !VHACD_DISABLE_THREADING + +//******************************************************************************************************************** +// Definition of the ThreadPool +//******************************************************************************************************************** + +class ThreadPool { + public: + ThreadPool(); + ThreadPool(int worker); + ~ThreadPool(); + template + auto enqueue(F&& f, Args&& ... args) +#ifndef __cpp_lib_is_invocable + -> std::future< typename std::result_of< F( Args... ) >::type>; +#else + -> std::future< typename std::invoke_result_t>; +#endif + private: + std::vector workers; + std::deque> tasks; + std::mutex task_mutex; + std::condition_variable cv; + bool closed; +}; + +ThreadPool::ThreadPool() + : ThreadPool(1) +{ +} + +ThreadPool::ThreadPool(int worker) + : closed(false) +{ + workers.reserve(worker); + for(int i=0; i lock(this->task_mutex); + while(true) + { + while (this->tasks.empty()) + { + if (this->closed) + { + return; + } + this->cv.wait(lock); + } + auto task = this->tasks.front(); + this->tasks.pop_front(); + lock.unlock(); + task(); + lock.lock(); + } + } + ); + } +} + +template +auto ThreadPool::enqueue(F&& f, Args&& ... args) +#ifndef __cpp_lib_is_invocable + -> std::future< typename std::result_of< F( Args... ) >::type> +#else + -> std::future< typename std::invoke_result_t> +#endif +{ + +#ifndef __cpp_lib_is_invocable + using return_type = typename std::result_of< F( Args... ) >::type; +#else + using return_type = typename std::invoke_result_t< F, Args... >; +#endif + auto task = std::make_shared > ( + std::bind(std::forward(f), std::forward(args)...) + ); + auto result = task->get_future(); + + { + std::unique_lock lock(task_mutex); + if (!closed) + { + tasks.emplace_back([task] + { + (*task)(); + }); + cv.notify_one(); + } + } + + return result; +} + +ThreadPool::~ThreadPool() { + { + std::unique_lock lock(task_mutex); + closed = true; + } + cv.notify_all(); + for (auto && worker : workers) + { + worker.join(); + } +} +#endif + +enum class Stages +{ + COMPUTE_BOUNDS_OF_INPUT_MESH, + REINDEXING_INPUT_MESH, + CREATE_RAYCAST_MESH, + VOXELIZING_INPUT_MESH, + BUILD_INITIAL_CONVEX_HULL, + PERFORMING_DECOMPOSITION, + INITIALIZING_CONVEX_HULLS_FOR_MERGING, + COMPUTING_COST_MATRIX, + MERGING_CONVEX_HULLS, + FINALIZING_RESULTS, + NUM_STAGES +}; + +class VHACDCallbacks +{ +public: + virtual void ProgressUpdate(Stages stage, + double stageProgress, + const char *operation) = 0; + virtual bool IsCanceled() const = 0; + + virtual ~VHACDCallbacks() = default; +}; + +enum class SplitAxis +{ + X_AXIS_NEGATIVE, + X_AXIS_POSITIVE, + Y_AXIS_NEGATIVE, + Y_AXIS_POSITIVE, + Z_AXIS_NEGATIVE, + Z_AXIS_POSITIVE, +}; + +// This class represents a collection of voxels, the convex hull +// which surrounds them, and a triangle mesh representation of those voxels +class VoxelHull +{ +public: + + // This method constructs a new VoxelHull based on a plane split of the parent + // convex hull + VoxelHull(const VoxelHull& parent, + SplitAxis axis, + uint32_t splitLoc); + + // Here we construct the initial convex hull around the + // entire voxel set + VoxelHull(Volume& voxels, + const IVHACD::Parameters ¶ms, + VHACDCallbacks *callbacks); + + ~VoxelHull() = default; + + // Helper method to refresh the min/max voxel bounding region + void MinMaxVoxelRegion(const Voxel &v); + + void BuildRaycastMesh(); + + // We now compute the convex hull relative to a triangle mesh generated + // from the voxels + void ComputeConvexHull(); + + // Returns true if this convex hull should be considered done + bool IsComplete(); + + + // Convert a voxel position into it's correct double precision location + VHACD::Vect3 GetPoint(const int32_t x, + const int32_t y, + const int32_t z, + const double scale, + const VHACD::Vect3& bmin) const; + + // Sees if we have already got an index for this voxel position. + // If the voxel position has already been indexed, we just return + // that index value. + // If not, then we convert it into the floating point position and + // add it to the index map + uint32_t GetVertexIndex(const VHACD::Vector3& p); + + // This method will convert the voxels into an actual indexed triangle mesh of boxes + // This serves two purposes. + // The primary purpose is so that when we compute a convex hull it considered all of the points + // for each voxel, not just the center point. If you don't do this, then the hulls don't fit the + // mesh accurately enough. + // The second reason we convert it into a triangle mesh is so that we can do raycasting against it + // to search for the best splitting plane fairly quickly. That algorithm will be discussed in the + // method which computes the best splitting plane. + void BuildVoxelMesh(); + + // Convert a single voxel position into an actual 3d box mesh comprised + // of 12 triangles + void AddVoxelBox(const Voxel &v); + + // Add the triangle represented by these 3 indices into the 'box' set of vertices + // to the output mesh + void AddTri(const std::array, 8>& box, + uint32_t i1, + uint32_t i2, + uint32_t i3); + + // Here we convert from voxel space to a 3d position, index it, and add + // the triangle positions and indices for the output mesh + void AddTriangle(const VHACD::Vector3& p1, + const VHACD::Vector3& p2, + const VHACD::Vector3& p3); + + // When computing the split plane, we start by simply + // taking the midpoint of the longest side. However, + // we can also search the surface and look for the greatest + // spot of concavity and use that as the split location. + // This will make the convex decomposition more efficient + // as it will tend to cut across the greatest point of + // concavity on the surface. + SplitAxis ComputeSplitPlane(uint32_t& location); + + VHACD::Vect3 GetPosition(const VHACD::Vector3& ip) const; + + double Raycast(const VHACD::Vector3& p1, + const VHACD::Vector3& p2) const; + + bool FindConcavity(uint32_t idx, + uint32_t& splitLoc); + + // Finding the greatest area of concavity.. + bool FindConcavityX(uint32_t& splitLoc); + + // Finding the greatest area of concavity.. + bool FindConcavityY(uint32_t& splitLoc); + + // Finding the greatest area of concavity.. + bool FindConcavityZ(uint32_t& splitLoc); + + // This operation is performed in a background thread. + // It splits the voxels by a plane + void PerformPlaneSplit(); + + // Used only for debugging. Saves the voxelized mesh to disk + // Optionally saves the original source mesh as well for comparison + void SaveVoxelMesh(const SimpleMesh& inputMesh, + bool saveVoxelMesh, + bool saveSourceMesh); + + void SaveOBJ(const char* fname, + const VoxelHull* h); + + void SaveOBJ(const char* fname); + +private: + void WriteOBJ(FILE* fph, + const std::vector& vertices, + const std::vector& indices, + uint32_t baseIndex); +public: + + SplitAxis m_axis{ SplitAxis::X_AXIS_NEGATIVE }; + Volume* m_voxels{ nullptr }; // The voxelized data set + double m_voxelScale{ 0 }; // Size of a single voxel + double m_voxelScaleHalf{ 0 }; // 1/2 of the size of a single voxel + VHACD::BoundsAABB m_voxelBounds; + VHACD::Vect3 m_voxelAdjust; // Minimum coordinates of the voxel space, with adjustment + uint32_t m_depth{ 0 }; // How deep in the recursion of the binary tree this hull is + uint32_t m_index{ 0 }; // Each convex hull is given a unique id to distinguish it from the others + double m_volumeError{ 0 }; // The percentage error from the convex hull volume vs. the voxel volume + double m_voxelVolume{ 0 }; // The volume of the voxels + double m_hullVolume{ 0 }; // The volume of the enclosing convex hull + + std::unique_ptr m_convexHull{ nullptr }; // The convex hull which encloses this set of voxels. + std::vector m_surfaceVoxels; // The voxels which are on the surface of the source mesh. + std::vector m_newSurfaceVoxels; // Voxels which are on the surface as a result of a plane split + std::vector m_interiorVoxels; // Voxels which are part of the interior of the hull + + std::unique_ptr m_hullA{ nullptr }; // hull resulting from one side of the plane split + std::unique_ptr m_hullB{ nullptr }; // hull resulting from the other side of the plane split + + // Defines the coordinates this convex hull comprises within the voxel volume + // of the entire source + VHACD::Vector3 m_1{ 0 }; + VHACD::Vector3 m_2{ 0 }; + AABBTree m_AABBTree; + std::unordered_map m_voxelIndexMap; // Maps from a voxel coordinate space into a vertex index space + std::vector m_vertices; + std::vector m_indices; + static uint32_t m_voxelHullCount; + IVHACD::Parameters m_params; + VHACDCallbacks* m_callbacks{ nullptr }; +}; + +uint32_t VoxelHull::m_voxelHullCount = 0; + +VoxelHull::VoxelHull(const VoxelHull& parent, + SplitAxis axis, + uint32_t splitLoc) + : m_axis(axis) + , m_voxels(parent.m_voxels) + , m_voxelScale(m_voxels->GetScale()) + , m_voxelScaleHalf(m_voxelScale * double(0.5)) + , m_voxelBounds(m_voxels->GetBounds()) + , m_voxelAdjust(m_voxelBounds.GetMin() - m_voxelScaleHalf) + , m_depth(parent.m_depth + 1) + , m_index(++m_voxelHullCount) + , m_1(parent.m_1) + , m_2(parent.m_2) + , m_params(parent.m_params) +{ + // Default copy the voxel region from the parent, but values will + // be adjusted next based on the split axis and location + switch ( m_axis ) + { + case SplitAxis::X_AXIS_NEGATIVE: + m_2.GetX() = splitLoc; + break; + case SplitAxis::X_AXIS_POSITIVE: + m_1.GetX() = splitLoc + 1; + break; + case SplitAxis::Y_AXIS_NEGATIVE: + m_2.GetY() = splitLoc; + break; + case SplitAxis::Y_AXIS_POSITIVE: + m_1.GetY() = splitLoc + 1; + break; + case SplitAxis::Z_AXIS_NEGATIVE: + m_2.GetZ() = splitLoc; + break; + case SplitAxis::Z_AXIS_POSITIVE: + m_1.GetZ() = splitLoc + 1; + break; + } + + // First, we copy all of the interior voxels from our parent + // which intersect our region + for (auto& i : parent.m_interiorVoxels) + { + VHACD::Vector3 v = i.GetVoxel(); + if (v.CWiseAllGE(m_1) && v.CWiseAllLE(m_2)) + { + bool newSurface = false; + switch ( m_axis ) + { + case SplitAxis::X_AXIS_NEGATIVE: + if ( v.GetX() == splitLoc ) + { + newSurface = true; + } + break; + case SplitAxis::X_AXIS_POSITIVE: + if ( v.GetX() == m_1.GetX() ) + { + newSurface = true; + } + break; + case SplitAxis::Y_AXIS_NEGATIVE: + if ( v.GetY() == splitLoc ) + { + newSurface = true; + } + break; + case SplitAxis::Y_AXIS_POSITIVE: + if ( v.GetY() == m_1.GetY() ) + { + newSurface = true; + } + break; + case SplitAxis::Z_AXIS_NEGATIVE: + if ( v.GetZ() == splitLoc ) + { + newSurface = true; + } + break; + case SplitAxis::Z_AXIS_POSITIVE: + if ( v.GetZ() == m_1.GetZ() ) + { + newSurface = true; + } + break; + } + // If his interior voxels lie directly on the split plane then + // these become new surface voxels for our patch + if ( newSurface ) + { + m_newSurfaceVoxels.push_back(i); + } + else + { + m_interiorVoxels.push_back(i); + } + } + } + // Next we copy all of the surface voxels which intersect our region + for (auto& i : parent.m_surfaceVoxels) + { + VHACD::Vector3 v = i.GetVoxel(); + if (v.CWiseAllGE(m_1) && v.CWiseAllLE(m_2)) + { + m_surfaceVoxels.push_back(i); + } + } + // Our parent's new surface voxels become our new surface voxels so long as they intersect our region + for (auto& i : parent.m_newSurfaceVoxels) + { + VHACD::Vector3 v = i.GetVoxel(); + if (v.CWiseAllGE(m_1) && v.CWiseAllLE(m_2)) + { + m_newSurfaceVoxels.push_back(i); + } + } + + // Recompute the min-max bounding box which would be different after the split occurs + m_1 = VHACD::Vector3(0x7FFFFFFF); + m_2 = VHACD::Vector3(0); + for (auto& i : m_surfaceVoxels) + { + MinMaxVoxelRegion(i); + } + for (auto& i : m_newSurfaceVoxels) + { + MinMaxVoxelRegion(i); + } + for (auto& i : m_interiorVoxels) + { + MinMaxVoxelRegion(i); + } + + BuildVoxelMesh(); + BuildRaycastMesh(); // build a raycast mesh of the voxel mesh + ComputeConvexHull(); +} + +VoxelHull::VoxelHull(Volume& voxels, + const IVHACD::Parameters& params, + VHACDCallbacks* callbacks) + : m_voxels(&voxels) + , m_voxelScale(m_voxels->GetScale()) + , m_voxelScaleHalf(m_voxelScale * double(0.5)) + , m_voxelBounds(m_voxels->GetBounds()) + , m_voxelAdjust(m_voxelBounds.GetMin() - m_voxelScaleHalf) + , m_index(++m_voxelHullCount) + // Here we get a copy of all voxels which lie on the surface mesh + , m_surfaceVoxels(m_voxels->GetSurfaceVoxels()) + // Now we get a copy of all voxels which are considered part of the 'interior' of the source mesh + , m_interiorVoxels(m_voxels->GetInteriorVoxels()) + , m_2(m_voxels->GetDimensions() - 1) + , m_params(params) + , m_callbacks(callbacks) +{ + BuildVoxelMesh(); + BuildRaycastMesh(); // build a raycast mesh of the voxel mesh + ComputeConvexHull(); +} + +void VoxelHull::MinMaxVoxelRegion(const Voxel& v) +{ + VHACD::Vector3 x = v.GetVoxel(); + m_1 = m_1.CWiseMin(x); + m_2 = m_2.CWiseMax(x); +} + +void VoxelHull::BuildRaycastMesh() +{ + // Create a raycast mesh representation of the voxelized surface mesh + if ( !m_indices.empty() ) + { + m_AABBTree = AABBTree(m_vertices, + m_indices); + } +} + +void VoxelHull::ComputeConvexHull() +{ + if ( !m_vertices.empty() ) + { + // we compute the convex hull as follows... + VHACD::QuickHull qh; + uint32_t tcount = qh.ComputeConvexHull(m_vertices, + uint32_t(m_vertices.size())); + if ( tcount ) + { + m_convexHull = std::unique_ptr(new IVHACD::ConvexHull); + + m_convexHull->m_points = qh.GetVertices(); + m_convexHull->m_triangles = qh.GetIndices(); + + VHACD::ComputeCentroid(m_convexHull->m_points, + m_convexHull->m_triangles, + m_convexHull->m_center); + m_convexHull->m_volume = VHACD::ComputeMeshVolume(m_convexHull->m_points, + m_convexHull->m_triangles); + } + } + if ( m_convexHull ) + { + m_hullVolume = m_convexHull->m_volume; + } + // This is the volume of a single voxel + double singleVoxelVolume = m_voxelScale * m_voxelScale * m_voxelScale; + size_t voxelCount = m_interiorVoxels.size() + m_newSurfaceVoxels.size() + m_surfaceVoxels.size(); + m_voxelVolume = singleVoxelVolume * double(voxelCount); + double diff = fabs(m_hullVolume - m_voxelVolume); + m_volumeError = (diff * 100) / m_voxelVolume; +} + +bool VoxelHull::IsComplete() +{ + bool ret = false; + if ( m_convexHull == nullptr ) + { + ret = true; + } + else if ( m_volumeError < m_params.m_minimumVolumePercentErrorAllowed ) + { + ret = true; + } + else if ( m_depth > m_params.m_maxRecursionDepth ) + { + ret = true; + } + else + { + // We compute the voxel width on all 3 axes and see if they are below the min threshold size + VHACD::Vector3 d = m_2 - m_1; + if ( d.GetX() <= m_params.m_minEdgeLength && + d.GetY() <= m_params.m_minEdgeLength && + d.GetZ() <= m_params.m_minEdgeLength ) + { + ret = true; + } + } + return ret; +} + +VHACD::Vect3 VoxelHull::GetPoint(const int32_t x, + const int32_t y, + const int32_t z, + const double scale, + const VHACD::Vect3& bmin) const +{ + return VHACD::Vect3(x * scale + bmin.GetX(), + y * scale + bmin.GetY(), + z * scale + bmin.GetZ()); +} + +uint32_t VoxelHull::GetVertexIndex(const VHACD::Vector3& p) +{ + uint32_t ret = 0; + uint32_t address = (p.GetX() << 20) | (p.GetY() << 10) | p.GetZ(); + auto found = m_voxelIndexMap.find(address); + if ( found != m_voxelIndexMap.end() ) + { + ret = found->second; + } + else + { + VHACD::Vect3 vertex = GetPoint(p.GetX(), + p.GetY(), + p.GetZ(), + m_voxelScale, + m_voxelAdjust); + ret = uint32_t(m_voxelIndexMap.size()); + m_voxelIndexMap[address] = ret; + m_vertices.emplace_back(vertex); + } + return ret; +} + +void VoxelHull::BuildVoxelMesh() +{ + // When we build the triangle mesh we do *not* need the interior voxels, only the ones + // which lie upon the logical surface of the mesh. + // Each time we perform a plane split, voxels which are along the splitting plane become + // 'new surface voxels'. + + for (auto& i : m_surfaceVoxels) + { + AddVoxelBox(i); + } + for (auto& i : m_newSurfaceVoxels) + { + AddVoxelBox(i); + } +} + +void VoxelHull::AddVoxelBox(const Voxel &v) +{ + // The voxel position of the upper left corner of the box + VHACD::Vector3 bmin(v.GetX(), + v.GetY(), + v.GetZ()); + // The voxel position of the lower right corner of the box + VHACD::Vector3 bmax(bmin.GetX() + 1, + bmin.GetY() + 1, + bmin.GetZ() + 1); + + // Build the set of 8 voxel positions representing + // the coordinates of the box + std::array, 8> box{{ + { bmin.GetX(), bmin.GetY(), bmin.GetZ() }, + { bmax.GetX(), bmin.GetY(), bmin.GetZ() }, + { bmax.GetX(), bmax.GetY(), bmin.GetZ() }, + { bmin.GetX(), bmax.GetY(), bmin.GetZ() }, + { bmin.GetX(), bmin.GetY(), bmax.GetZ() }, + { bmax.GetX(), bmin.GetY(), bmax.GetZ() }, + { bmax.GetX(), bmax.GetY(), bmax.GetZ() }, + { bmin.GetX(), bmax.GetY(), bmax.GetZ() } + }}; + + // Now add the 12 triangles comprising the 3d box + AddTri(box, 2, 1, 0); + AddTri(box, 3, 2, 0); + + AddTri(box, 7, 2, 3); + AddTri(box, 7, 6, 2); + + AddTri(box, 5, 1, 2); + AddTri(box, 5, 2, 6); + + AddTri(box, 5, 4, 1); + AddTri(box, 4, 0, 1); + + AddTri(box, 4, 6, 7); + AddTri(box, 4, 5, 6); + + AddTri(box, 4, 7, 0); + AddTri(box, 7, 3, 0); +} + +void VoxelHull::AddTri(const std::array, 8>& box, + uint32_t i1, + uint32_t i2, + uint32_t i3) +{ + AddTriangle(box[i1], box[i2], box[i3]); +} + +void VoxelHull::AddTriangle(const VHACD::Vector3& p1, + const VHACD::Vector3& p2, + const VHACD::Vector3& p3) +{ + uint32_t i1 = GetVertexIndex(p1); + uint32_t i2 = GetVertexIndex(p2); + uint32_t i3 = GetVertexIndex(p3); + + m_indices.emplace_back(i1, i2, i3); +} + +SplitAxis VoxelHull::ComputeSplitPlane(uint32_t& location) +{ + SplitAxis ret = SplitAxis::X_AXIS_NEGATIVE; + + VHACD::Vector3 d = m_2 - m_1; + + if ( d.GetX() >= d.GetY() && d.GetX() >= d.GetZ() ) + { + ret = SplitAxis::X_AXIS_NEGATIVE; + location = (m_2.GetX() + 1 + m_1.GetX()) / 2; + uint32_t edgeLoc; + if ( m_params.m_findBestPlane && FindConcavityX(edgeLoc) ) + { + location = edgeLoc; + } + } + else if ( d.GetY() >= d.GetX() && d.GetY() >= d.GetZ() ) + { + ret = SplitAxis::Y_AXIS_NEGATIVE; + location = (m_2.GetY() + 1 + m_1.GetY()) / 2; + uint32_t edgeLoc; + if ( m_params.m_findBestPlane && FindConcavityY(edgeLoc) ) + { + location = edgeLoc; + } + } + else + { + ret = SplitAxis::Z_AXIS_NEGATIVE; + location = (m_2.GetZ() + 1 + m_1.GetZ()) / 2; + uint32_t edgeLoc; + if ( m_params.m_findBestPlane && FindConcavityZ(edgeLoc) ) + { + location = edgeLoc; + } + } + + return ret; +} + +VHACD::Vect3 VoxelHull::GetPosition(const VHACD::Vector3& ip) const +{ + return GetPoint(ip.GetX(), + ip.GetY(), + ip.GetZ(), + m_voxelScale, + m_voxelAdjust); +} + +double VoxelHull::Raycast(const VHACD::Vector3& p1, + const VHACD::Vector3& p2) const +{ + double ret; + VHACD::Vect3 from = GetPosition(p1); + VHACD::Vect3 to = GetPosition(p2); + + double outT; + double faceSign; + VHACD::Vect3 hitLocation; + if (m_AABBTree.TraceRay(from, to, outT, faceSign, hitLocation)) + { + ret = (from - hitLocation).GetNorm(); + } + else + { + ret = 0; // if it doesn't hit anything, just assign it to zero. + } + + return ret; +} + +bool VoxelHull::FindConcavity(uint32_t idx, + uint32_t& splitLoc) +{ + bool ret = false; + + int32_t d = (m_2[idx] - m_1[idx]) + 1; // The length of the getX axis in voxel space + + uint32_t idx1; + uint32_t idx2; + uint32_t idx3; + switch (idx) + { + case 0: // X + idx1 = 0; + idx2 = 1; + idx3 = 2; + break; + case 1: // Y + idx1 = 1; + idx2 = 0; + idx3 = 2; + break; + case 2: + idx1 = 2; + idx2 = 1; + idx3 = 0; + break; + default: + /* + * To silence uninitialized variable warnings + */ + idx1 = 0; + idx2 = 0; + idx3 = 0; + assert(0 && "findConcavity::idx must be 0, 1, or 2"); + break; + } + + // We will compute the edge error on the XY plane and the XZ plane + // searching for the greatest location of concavity + std::vector edgeError1 = std::vector(d); + std::vector edgeError2 = std::vector(d); + + // Counter of number of voxel samples on the XY plane we have accumulated + uint32_t index1 = 0; + + // Compute Edge Error on the XY plane + for (uint32_t i0 = m_1[idx1]; i0 <= m_2[idx1]; i0++) + { + double errorTotal = 0; + // We now perform a raycast from the sides inward on the XY plane to + // determine the total error (distance of the surface from the sides) + // along this getX position. + for (uint32_t i1 = m_1[idx2]; i1 <= m_2[idx2]; i1++) + { + VHACD::Vector3 p1; + VHACD::Vector3 p2; + switch (idx) + { + case 0: + { + p1 = VHACD::Vector3(i0, i1, m_1.GetZ() - 2); + p2 = VHACD::Vector3(i0, i1, m_2.GetZ() + 2); + break; + } + case 1: + { + p1 = VHACD::Vector3(i1, i0, m_1.GetZ() - 2); + p2 = VHACD::Vector3(i1, i0, m_2.GetZ() + 2); + break; + } + case 2: + { + p1 = VHACD::Vector3(m_1.GetX() - 2, i1, i0); + p2 = VHACD::Vector3(m_2.GetX() + 2, i1, i0); + break; + } + } + + double e1 = Raycast(p1, p2); + double e2 = Raycast(p2, p1); + + errorTotal = errorTotal + e1 + e2; + } + // The total amount of edge error along this voxel location + edgeError1[index1] = errorTotal; + index1++; + } + + // Compute edge error along the XZ plane + uint32_t index2 = 0; + + for (uint32_t i0 = m_1[idx1]; i0 <= m_2[idx1]; i0++) + { + double errorTotal = 0; + + for (uint32_t i1 = m_1[idx3]; i1 <= m_2[idx3]; i1++) + { + VHACD::Vector3 p1; + VHACD::Vector3 p2; + switch (idx) + { + case 0: + { + p1 = VHACD::Vector3(i0, m_1.GetY() - 2, i1); + p2 = VHACD::Vector3(i0, m_2.GetY() + 2, i1); + break; + } + case 1: + { + p1 = VHACD::Vector3(m_1.GetX() - 2, i0, i1); + p2 = VHACD::Vector3(m_2.GetX() + 2, i0, i1); + break; + } + case 2: + { + p1 = VHACD::Vector3(i1, m_1.GetY() - 2, i0); + p2 = VHACD::Vector3(i1, m_2.GetY() + 2, i0); + break; + } + } + + double e1 = Raycast(p1, p2); // raycast from one side to the interior + double e2 = Raycast(p2, p1); // raycast from the other side to the interior + + errorTotal = errorTotal + e1 + e2; + } + edgeError2[index2] = errorTotal; + index2++; + } + + + // we now compute the first derivative to find the greatest spot of concavity on the XY plane + double maxDiff = 0; + uint32_t maxC = 0; + for (uint32_t x = 1; x < index1; x++) + { + if ( edgeError1[x] > 0 && edgeError1[x - 1] > 0 ) + { + double diff = abs(edgeError1[x] - edgeError1[x - 1]); + if ( diff > maxDiff ) + { + maxDiff = diff; + maxC = x-1; + } + } + } + + // Now see if there is a greater concavity on the XZ plane + for (uint32_t x = 1; x < index2; x++) + { + if ( edgeError2[x] > 0 && edgeError2[x - 1] > 0 ) + { + double diff = abs(edgeError2[x] - edgeError2[x - 1]); + if ( diff > maxDiff ) + { + maxDiff = diff; + maxC = x - 1; + } + } + } + + splitLoc = maxC + m_1[idx1]; + + // we do not allow an edge split if it is too close to the ends + if ( splitLoc > (m_1[idx1] + 4) + && splitLoc < (m_2[idx1] - 4) ) + { + ret = true; + } + + return ret; +} + +// Finding the greatest area of concavity.. +bool VoxelHull::FindConcavityX(uint32_t& splitLoc) +{ + return FindConcavity(0, splitLoc); +} + +// Finding the greatest area of concavity.. +bool VoxelHull::FindConcavityY(uint32_t& splitLoc) +{ + return FindConcavity(1, splitLoc); +} + +// Finding the greatest area of concavity.. +bool VoxelHull::FindConcavityZ(uint32_t &splitLoc) +{ + return FindConcavity(2, splitLoc); +} + +void VoxelHull::PerformPlaneSplit() +{ + if ( IsComplete() ) + { + } + else + { + uint32_t splitLoc; + SplitAxis axis = ComputeSplitPlane(splitLoc); + switch ( axis ) + { + case SplitAxis::X_AXIS_NEGATIVE: + case SplitAxis::X_AXIS_POSITIVE: + // Split on the getX axis at this split location + m_hullA = std::unique_ptr(new VoxelHull(*this, SplitAxis::X_AXIS_NEGATIVE, splitLoc)); + m_hullB = std::unique_ptr(new VoxelHull(*this, SplitAxis::X_AXIS_POSITIVE, splitLoc)); + break; + case SplitAxis::Y_AXIS_NEGATIVE: + case SplitAxis::Y_AXIS_POSITIVE: + // Split on the 1 axis at this split location + m_hullA = std::unique_ptr(new VoxelHull(*this, SplitAxis::Y_AXIS_NEGATIVE, splitLoc)); + m_hullB = std::unique_ptr(new VoxelHull(*this, SplitAxis::Y_AXIS_POSITIVE, splitLoc)); + break; + case SplitAxis::Z_AXIS_NEGATIVE: + case SplitAxis::Z_AXIS_POSITIVE: + // Split on the getZ axis at this split location + m_hullA = std::unique_ptr(new VoxelHull(*this, SplitAxis::Z_AXIS_NEGATIVE, splitLoc)); + m_hullB = std::unique_ptr(new VoxelHull(*this, SplitAxis::Z_AXIS_POSITIVE, splitLoc)); + break; + } + } +} + +void VoxelHull::SaveVoxelMesh(const SimpleMesh &inputMesh, + bool saveVoxelMesh, + bool saveSourceMesh) +{ + char scratch[512]; + snprintf(scratch, + sizeof(scratch), + "voxel-mesh-%03d.obj", + m_index); + FILE *fph = fopen(scratch, + "wb"); + if ( fph ) + { + uint32_t baseIndex = 1; + if ( saveVoxelMesh ) + { + WriteOBJ(fph, + m_vertices, + m_indices, + baseIndex); + baseIndex += uint32_t(m_vertices.size()); + } + if ( saveSourceMesh ) + { + WriteOBJ(fph, + inputMesh.m_vertices, + inputMesh.m_indices, + baseIndex); + } + fclose(fph); + } +} + +void VoxelHull::SaveOBJ(const char* fname, + const VoxelHull* h) +{ + FILE *fph = fopen(fname,"wb"); + if ( fph ) + { + uint32_t baseIndex = 1; + WriteOBJ(fph, + m_vertices, + m_indices, + baseIndex); + + baseIndex += uint32_t(m_vertices.size()); + + WriteOBJ(fph, + h->m_vertices, + h->m_indices, + baseIndex); + fclose(fph); + } +} + +void VoxelHull::SaveOBJ(const char *fname) +{ + FILE *fph = fopen(fname, "wb"); + if ( fph ) + { + printf("Saving '%s' with %d vertices and %d triangles\n", + fname, + uint32_t(m_vertices.size()), + uint32_t(m_indices.size())); + WriteOBJ(fph, + m_vertices, + m_indices, + 1); + fclose(fph); + } +} + +void VoxelHull::WriteOBJ(FILE* fph, + const std::vector& vertices, + const std::vector& indices, + uint32_t baseIndex) +{ + if (!fph) + { + return; + } + + for (size_t i = 0; i < vertices.size(); ++i) + { + const VHACD::Vertex& v = vertices[i]; + fprintf(fph, "v %0.9f %0.9f %0.9f\n", + v.mX, + v.mY, + v.mZ); + } + + for (size_t i = 0; i < indices.size(); ++i) + { + const VHACD::Triangle& t = indices[i]; + fprintf(fph, "f %d %d %d\n", + t.mI0 + baseIndex, + t.mI1 + baseIndex, + t.mI2 + baseIndex); + } +} + +class VHACDImpl; + +// This class represents a single task to compute the volume error +// of two convex hulls combined +class CostTask +{ +public: + VHACDImpl* m_this{ nullptr }; + IVHACD::ConvexHull* m_hullA{ nullptr }; + IVHACD::ConvexHull* m_hullB{ nullptr }; + double m_concavity{ 0 }; // concavity of the two combined + std::future m_future; +}; + +class HullPair +{ +public: + HullPair() = default; + HullPair(uint32_t hullA, + uint32_t hullB, + double concavity); + + bool operator<(const HullPair &h) const; + + uint32_t m_hullA{ 0 }; + uint32_t m_hullB{ 0 }; + double m_concavity{ 0 }; +}; + +HullPair::HullPair(uint32_t hullA, + uint32_t hullB, + double concavity) + : m_hullA(hullA) + , m_hullB(hullB) + , m_concavity(concavity) +{ +} + +bool HullPair::operator<(const HullPair &h) const +{ + return m_concavity > h.m_concavity ? true : false; +} + +// void jobCallback(void* userPtr); + +class VHACDImpl : public IVHACD, public VHACDCallbacks +{ + // Don't consider more than 100,000 convex hulls. + static constexpr uint32_t MaxConvexHullFragments{ 100000 }; +public: + VHACDImpl() = default; + + /* + * Overrides VHACD::IVHACD + */ + ~VHACDImpl() override + { + Clean(); + } + + void Cancel() override final; + + bool Compute(const float* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) override final; + + bool Compute(const double* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) override final; + + uint32_t GetNConvexHulls() const override final; + + bool GetConvexHull(const uint32_t index, + ConvexHull& ch) const override final; + + void Clean() override final; // release internally allocated memory + + void Release() override final; + + // Will compute the center of mass of the convex hull decomposition results and return it + // in 'centerOfMass'. Returns false if the center of mass could not be computed. + bool ComputeCenterOfMass(double centerOfMass[3]) const override final; + + // In synchronous mode (non-multi-threaded) the state is always 'ready' + // In asynchronous mode, this returns true if the background thread is not still actively computing + // a new solution. In an asynchronous config the 'IsReady' call will report any update or log + // messages in the caller's current thread. + bool IsReady(void) const override final; + + /** + * At the request of LegionFu : out_look@foxmail.com + * This method will return which convex hull is closest to the source position. + * You can use this method to figure out, for example, which vertices in the original + * source mesh are best associated with which convex hull. + * + * @param pos : The input 3d position to test against + * + * @return : Returns which convex hull this position is closest to. + */ + uint32_t findNearestConvexHull(const double pos[3], + double& distanceToHull) override final; + +// private: + bool Compute(const std::vector& points, + const std::vector& triangles, + const Parameters& params); + + // Take the source position, normalize it, and then convert it into an index position + uint32_t GetIndex(VHACD::VertexIndex& vi, + const VHACD::Vertex& p); + + // This copies the input mesh while scaling the input positions + // to fit into a normalized unit cube. It also re-indexes all of the + // vertex positions in case they weren't clean coming in. + void CopyInputMesh(const std::vector& points, + const std::vector& triangles); + + void ScaleOutputConvexHull(ConvexHull &ch); + + void AddCostToPriorityQueue(CostTask& task); + + void ReleaseConvexHull(ConvexHull* ch); + + void PerformConvexDecomposition(); + + double ComputeConvexHullVolume(const ConvexHull& sm); + + double ComputeVolume4(const VHACD::Vect3& a, + const VHACD::Vect3& b, + const VHACD::Vect3& c, + const VHACD::Vect3& d); + + double ComputeConcavity(double volumeSeparate, + double volumeCombined, + double volumeMesh); + + // See if we can compute the cost without having to actually merge convex hulls. + // If the axis aligned bounding boxes (slightly inflated) of the two convex hulls + // do not intersect, then we don't need to actually compute the merged convex hull + // volume. + bool DoFastCost(CostTask& mt); + + void PerformMergeCostTask(CostTask& mt); + + ConvexHull* ComputeReducedConvexHull(const ConvexHull& ch, + uint32_t maxVerts, + bool projectHullVertices); + + // Take the points in convex hull A and the points in convex hull B and generate + // a new convex hull on the combined set of points. + // Once completed, we create a SimpleMesh instance to hold the triangle mesh + // and we compute an inflated AABB for it. + ConvexHull* ComputeCombinedConvexHull(const ConvexHull& sm1, + const ConvexHull& sm2); + + + ConvexHull* GetHull(uint32_t index); + + bool RemoveHull(uint32_t index); + + ConvexHull* CopyConvexHull(const ConvexHull& source); + + const char* GetStageName(Stages stage) const; + + /* + * Overrides VHACD::VHACDCallbacks + */ + void ProgressUpdate(Stages stage, + double stageProgress, + const char* operation) override final; + + bool IsCanceled() const override final; + + std::atomic m_canceled{ false }; + Parameters m_params; // Convex decomposition parameters + + std::vector m_convexHulls; // Finalized convex hulls + std::vector> m_voxelHulls; // completed voxel hulls + std::vector> m_pendingHulls; + + std::vector> m_trees; + VHACD::AABBTree m_AABBTree; + VHACD::Volume m_voxelize; + VHACD::Vect3 m_center; + double m_scale{ double(1.0) }; + double m_recipScale{ double(1.0) }; + SimpleMesh m_inputMesh; // re-indexed and normalized input mesh + std::vector m_vertices; + std::vector m_indices; + + double m_overallHullVolume{ double(0.0) }; + double m_voxelScale{ double(0.0) }; + double m_voxelHalfScale{ double(0.0) }; + VHACD::Vect3 m_voxelBmin; + VHACD::Vect3 m_voxelBmax; + uint32_t m_meshId{ 0 }; + std::priority_queue m_hullPairQueue; +#if !VHACD_DISABLE_THREADING + std::unique_ptr m_threadPool{ nullptr }; +#endif + std::unordered_map m_hulls; + + double m_overallProgress{ double(0.0) }; + double m_stageProgress{ double(0.0) }; + double m_operationProgress{ double(0.0) }; +}; + +void VHACDImpl::Cancel() +{ + m_canceled = true; +} + +bool VHACDImpl::Compute(const float* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) +{ + std::vector v; + v.reserve(countPoints); + for (uint32_t i = 0; i < countPoints; ++i) + { + v.emplace_back(points[i * 3 + 0], + points[i * 3 + 1], + points[i * 3 + 2]); + } + + std::vector t; + t.reserve(countTriangles); + for (uint32_t i = 0; i < countTriangles; ++i) + { + t.emplace_back(triangles[i * 3 + 0], + triangles[i * 3 + 1], + triangles[i * 3 + 2]); + } + + return Compute(v, t, params); +} + +bool VHACDImpl::Compute(const double* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) +{ + std::vector v; + v.reserve(countPoints); + for (uint32_t i = 0; i < countPoints; ++i) + { + v.emplace_back(points[i * 3 + 0], + points[i * 3 + 1], + points[i * 3 + 2]); + } + + std::vector t; + t.reserve(countTriangles); + for (uint32_t i = 0; i < countTriangles; ++i) + { + t.emplace_back(triangles[i * 3 + 0], + triangles[i * 3 + 1], + triangles[i * 3 + 2]); + } + + return Compute(v, t, params); +} + +uint32_t VHACDImpl::GetNConvexHulls() const +{ + return uint32_t(m_convexHulls.size()); +} + +bool VHACDImpl::GetConvexHull(const uint32_t index, + ConvexHull& ch) const +{ + bool ret = false; + + if ( index < uint32_t(m_convexHulls.size() )) + { + ch = *m_convexHulls[index]; + ret = true; + } + + return ret; +} + +void VHACDImpl::Clean() +{ +#if !VHACD_DISABLE_THREADING + m_threadPool = nullptr; +#endif + + m_trees.clear(); + + for (auto& ch : m_convexHulls) + { + ReleaseConvexHull(ch); + } + m_convexHulls.clear(); + + for (auto& ch : m_hulls) + { + ReleaseConvexHull(ch.second); + } + m_hulls.clear(); + + m_voxelHulls.clear(); + + m_pendingHulls.clear(); + + m_vertices.clear(); + m_indices.clear(); +} + +void VHACDImpl::Release() +{ + delete this; +} + +bool VHACDImpl::ComputeCenterOfMass(double centerOfMass[3]) const +{ + bool ret = false; + + return ret; +} + +bool VHACDImpl::IsReady() const +{ + return true; +} + +uint32_t VHACDImpl::findNearestConvexHull(const double pos[3], + double& distanceToHull) +{ + uint32_t ret = 0; // The default return code is zero + + uint32_t hullCount = GetNConvexHulls(); + distanceToHull = 0; + // First, make sure that we have valid and completed results + if ( hullCount ) + { + // See if we already have AABB trees created for each convex hull + if ( m_trees.empty() ) + { + // For each convex hull, we generate an AABB tree for fast closest point queries + for (uint32_t i = 0; i < hullCount; i++) + { + VHACD::IVHACD::ConvexHull ch; + GetConvexHull(i,ch); + // Pass the triangle mesh to create an AABB tree instance based on it. + m_trees.emplace_back(new AABBTree(ch.m_points, + ch.m_triangles)); + } + } + // We now compute the closest point to each convex hull and save the nearest one + double closest = 1e99; + for (uint32_t i = 0; i < hullCount; i++) + { + std::unique_ptr& t = m_trees[i]; + if ( t ) + { + VHACD::Vect3 closestPoint; + VHACD::Vect3 position(pos[0], + pos[1], + pos[2]); + if ( t->GetClosestPointWithinDistance(position, 1e99, closestPoint)) + { + VHACD::Vect3 d = position - closestPoint; + double distanceSquared = d.GetNormSquared(); + if ( distanceSquared < closest ) + { + closest = distanceSquared; + ret = i; + } + } + } + } + distanceToHull = sqrt(closest); // compute the distance to the nearest convex hull + } + + return ret; +} + +bool VHACDImpl::Compute(const std::vector& points, + const std::vector& triangles, + const Parameters& params) +{ + bool ret = false; + + m_params = params; + m_canceled = false; + + Clean(); // release any previous results +#if !VHACD_DISABLE_THREADING + if ( m_params.m_asyncACD ) + { + m_threadPool = std::unique_ptr(new ThreadPool(8)); + } +#endif + CopyInputMesh(points, + triangles); + if ( !m_canceled ) + { + // We now recursively perform convex decomposition until complete + PerformConvexDecomposition(); + } + + if ( m_canceled ) + { + Clean(); + ret = false; + if ( m_params.m_logger ) + { + m_params.m_logger->Log("VHACD operation canceled before it was complete."); + } + } + else + { + ret = true; + } +#if !VHACD_DISABLE_THREADING + m_threadPool = nullptr; +#endif + return ret; +} + +uint32_t VHACDImpl::GetIndex(VHACD::VertexIndex& vi, + const VHACD::Vertex& p) +{ + VHACD::Vect3 pos = (VHACD::Vect3(p) - m_center) * m_recipScale; + bool newPos; + uint32_t ret = vi.GetIndex(pos, + newPos); + return ret; +} + +void VHACDImpl::CopyInputMesh(const std::vector& points, + const std::vector& triangles) +{ + m_vertices.clear(); + m_indices.clear(); + m_indices.reserve(triangles.size()); + + // First we must find the bounding box of this input vertices and normalize them into a unit-cube + VHACD::Vect3 bmin( FLT_MAX); + VHACD::Vect3 bmax(-FLT_MAX); + ProgressUpdate(Stages::COMPUTE_BOUNDS_OF_INPUT_MESH, + 0, + "ComputingBounds"); + for (uint32_t i = 0; i < points.size(); i++) + { + const VHACD::Vertex& p = points[i]; + + bmin = bmin.CWiseMin(p); + bmax = bmax.CWiseMax(p); + } + ProgressUpdate(Stages::COMPUTE_BOUNDS_OF_INPUT_MESH, + 100, + "ComputingBounds"); + + m_center = (bmax + bmin) * double(0.5); + + VHACD::Vect3 scale = bmax - bmin; + m_scale = scale.MaxCoeff(); + + m_recipScale = m_scale > double(0.0) ? double(1.0) / m_scale : double(0.0); + + { + VHACD::VertexIndex vi = VHACD::VertexIndex(double(0.001), false); + + uint32_t dcount = 0; + + for (uint32_t i = 0; i < triangles.size() && !m_canceled; ++i) + { + const VHACD::Triangle& t = triangles[i]; + const VHACD::Vertex& p1 = points[t.mI0]; + const VHACD::Vertex& p2 = points[t.mI1]; + const VHACD::Vertex& p3 = points[t.mI2]; + + uint32_t i1 = GetIndex(vi, p1); + uint32_t i2 = GetIndex(vi, p2); + uint32_t i3 = GetIndex(vi, p3); + + if ( i1 == i2 || i1 == i3 || i2 == i3 ) + { + dcount++; + } + else + { + m_indices.emplace_back(i1, i2, i3); + } + } + + if ( dcount ) + { + if ( m_params.m_logger ) + { + char scratch[512]; + snprintf(scratch, + sizeof(scratch), + "Skipped %d degenerate triangles", dcount); + m_params.m_logger->Log(scratch); + } + } + + m_vertices = vi.TakeVertices(); + } + + // Create the raycast mesh + if ( !m_canceled ) + { + ProgressUpdate(Stages::CREATE_RAYCAST_MESH, + 0, + "Building RaycastMesh"); + m_AABBTree = VHACD::AABBTree(m_vertices, + m_indices); + ProgressUpdate(Stages::CREATE_RAYCAST_MESH, + 100, + "RaycastMesh completed"); + } + if ( !m_canceled ) + { + ProgressUpdate(Stages::VOXELIZING_INPUT_MESH, + 0, + "Voxelizing Input Mesh"); + m_voxelize = VHACD::Volume(); + m_voxelize.Voxelize(m_vertices, + m_indices, + m_params.m_resolution, + m_params.m_fillMode, + m_AABBTree); + m_voxelScale = m_voxelize.GetScale(); + m_voxelHalfScale = m_voxelScale * double(0.5); + m_voxelBmin = m_voxelize.GetBounds().GetMin(); + m_voxelBmax = m_voxelize.GetBounds().GetMax(); + ProgressUpdate(Stages::VOXELIZING_INPUT_MESH, + 100, + "Voxelization complete"); + } + + m_inputMesh.m_vertices = m_vertices; + m_inputMesh.m_indices = m_indices; + if ( !m_canceled ) + { + ProgressUpdate(Stages::BUILD_INITIAL_CONVEX_HULL, + 0, + "Build initial ConvexHull"); + std::unique_ptr vh = std::unique_ptr(new VoxelHull(m_voxelize, + m_params, + this)); + if ( vh->m_convexHull ) + { + m_overallHullVolume = vh->m_convexHull->m_volume; + } + m_pendingHulls.push_back(std::move(vh)); + ProgressUpdate(Stages::BUILD_INITIAL_CONVEX_HULL, + 100, + "Initial ConvexHull complete"); + } +} + +void VHACDImpl::ScaleOutputConvexHull(ConvexHull& ch) +{ + for (uint32_t i = 0; i < ch.m_points.size(); i++) + { + VHACD::Vect3 p = ch.m_points[i]; + p = (p * m_scale) + m_center; + ch.m_points[i] = p; + } + ch.m_volume = ComputeConvexHullVolume(ch); // get the combined volume + VHACD::BoundsAABB b(ch.m_points); + ch.mBmin = b.GetMin(); + ch.mBmax = b.GetMax(); + ComputeCentroid(ch.m_points, + ch.m_triangles, + ch.m_center); +} + +void VHACDImpl::AddCostToPriorityQueue(CostTask& task) +{ + HullPair hp(task.m_hullA->m_meshId, + task.m_hullB->m_meshId, + task.m_concavity); + m_hullPairQueue.push(hp); +} + +void VHACDImpl::ReleaseConvexHull(ConvexHull* ch) +{ + if ( ch ) + { + delete ch; + } +} + +void jobCallback(std::unique_ptr& userPtr) +{ + userPtr->PerformPlaneSplit(); +} + +void computeMergeCostTask(CostTask& ptr) +{ + ptr.m_this->PerformMergeCostTask(ptr); +} + +void VHACDImpl::PerformConvexDecomposition() +{ + { + ScopedTime st("Convex Decomposition", + m_params.m_logger); + double maxHulls = pow(2, m_params.m_maxRecursionDepth); + // We recursively split convex hulls until we can + // no longer recurse further. + Timer t; + + while ( !m_pendingHulls.empty() && !m_canceled ) + { + size_t count = m_pendingHulls.size() + m_voxelHulls.size(); + double e = t.PeekElapsedSeconds(); + if ( e >= double(0.1) ) + { + t.Reset(); + double stageProgress = (double(count) * double(100.0)) / maxHulls; + ProgressUpdate(Stages::PERFORMING_DECOMPOSITION, + stageProgress, + "Performing recursive decomposition of convex hulls"); + } + // First we make a copy of the hulls we are processing + std::vector> oldList = std::move(m_pendingHulls); + // For each hull we want to split, we either + // immediately perform the plane split or we post it as + // a job to be performed in a background thread + std::vector> futures(oldList.size()); + uint32_t futureCount = 0; + for (auto& i : oldList) + { + if ( i->IsComplete() || count > MaxConvexHullFragments ) + { + } + else + { +#if !VHACD_DISABLE_THREADING + if ( m_threadPool ) + { + futures[futureCount] = m_threadPool->enqueue([&i] + { + jobCallback(i); + }); + futureCount++; + } + else +#endif + { + i->PerformPlaneSplit(); + } + } + } + // Wait for any outstanding jobs to complete in the background threads + if ( futureCount ) + { + for (uint32_t i = 0; i < futureCount; i++) + { + futures[i].get(); + } + } + // Now, we rebuild the pending convex hulls list by + // adding the two children to the output list if + // we need to recurse them further + for (auto& vh : oldList) + { + if ( vh->IsComplete() || count > MaxConvexHullFragments ) + { + if ( vh->m_convexHull ) + { + m_voxelHulls.push_back(std::move(vh)); + } + } + else + { + if ( vh->m_hullA ) + { + m_pendingHulls.push_back(std::move(vh->m_hullA)); + } + if ( vh->m_hullB ) + { + m_pendingHulls.push_back(std::move(vh->m_hullB)); + } + } + } + } + } + + if ( !m_canceled ) + { + // Give each convex hull a unique guid + m_meshId = 0; + m_hulls.clear(); + + // Build the convex hull id map + std::vector hulls; + + ProgressUpdate(Stages::INITIALIZING_CONVEX_HULLS_FOR_MERGING, + 0, + "Initializing ConvexHulls"); + for (auto& vh : m_voxelHulls) + { + if ( m_canceled ) + { + break; + } + ConvexHull* ch = CopyConvexHull(*vh->m_convexHull); + m_meshId++; + ch->m_meshId = m_meshId; + m_hulls[m_meshId] = ch; + // Compute the volume of the convex hull + ch->m_volume = ComputeConvexHullVolume(*ch); + // Compute the AABB of the convex hull + VHACD::BoundsAABB b = VHACD::BoundsAABB(ch->m_points).Inflate(double(0.1)); + ch->mBmin = b.GetMin(); + ch->mBmax = b.GetMax(); + + ComputeCentroid(ch->m_points, + ch->m_triangles, + ch->m_center); + + hulls.push_back(ch); + } + ProgressUpdate(Stages::INITIALIZING_CONVEX_HULLS_FOR_MERGING, + 100, + "ConvexHull initialization complete"); + + m_voxelHulls.clear(); + + // here we merge convex hulls as needed until the match the + // desired maximum hull count. + size_t hullCount = hulls.size(); + + if ( hullCount > m_params.m_maxConvexHulls && !m_canceled) + { + size_t costMatrixSize = ((hullCount * hullCount) - hullCount) >> 1; + std::vector tasks; + tasks.reserve(costMatrixSize); + + ScopedTime st("Computing the Cost Matrix", + m_params.m_logger); + // First thing we need to do is compute the cost matrix + // This is computed as the volume error of any two convex hulls + // combined + ProgressUpdate(Stages::COMPUTING_COST_MATRIX, + 0, + "Computing Hull Merge Cost Matrix"); + for (size_t i = 1; i < hullCount && !m_canceled; i++) + { + ConvexHull* chA = hulls[i]; + + for (size_t j = 0; j < i && !m_canceled; j++) + { + ConvexHull* chB = hulls[j]; + + CostTask ct; + ct.m_hullA = chA; + ct.m_hullB = chB; + ct.m_this = this; + + if ( DoFastCost(ct) ) + { + } + else + { + tasks.push_back(std::move(ct)); + CostTask* task = &tasks.back(); +#if !VHACD_DISABLE_THREADING + if ( m_threadPool ) + { + task->m_future = m_threadPool->enqueue([task] + { + computeMergeCostTask(*task); + }); + } +#endif + } + } + } + + if ( !m_canceled ) + { +#if !VHACD_DISABLE_THREADING + if ( m_threadPool ) + { + for (CostTask& task : tasks) + { + task.m_future.get(); + } + + for (CostTask& task : tasks) + { + AddCostToPriorityQueue(task); + } + } + else +#endif + { + for (CostTask& task : tasks) + { + PerformMergeCostTask(task); + AddCostToPriorityQueue(task); + } + } + ProgressUpdate(Stages::COMPUTING_COST_MATRIX, + 100, + "Finished cost matrix"); + } + + if ( !m_canceled ) + { + ScopedTime stMerging("Merging Convex Hulls", + m_params.m_logger); + Timer t; + // Now that we know the cost to merge each hull, we can begin merging them. + bool cancel = false; + + uint32_t maxMergeCount = uint32_t(m_hulls.size()) - m_params.m_maxConvexHulls; + uint32_t startCount = uint32_t(m_hulls.size()); + + while ( !cancel + && m_hulls.size() > m_params.m_maxConvexHulls + && !m_hullPairQueue.empty() + && !m_canceled) + { + double e = t.PeekElapsedSeconds(); + if ( e >= double(0.1) ) + { + t.Reset(); + uint32_t hullsProcessed = startCount - uint32_t(m_hulls.size() ); + double stageProgress = double(hullsProcessed * 100) / double(maxMergeCount); + ProgressUpdate(Stages::MERGING_CONVEX_HULLS, + stageProgress, + "Merging Convex Hulls"); + } + + HullPair hp = m_hullPairQueue.top(); + m_hullPairQueue.pop(); + + // It is entirely possible that the hull pair queue can + // have references to convex hulls that are no longer valid + // because they were previously merged. So we check for this + // and if either hull referenced in this pair no longer + // exists, then we skip it. + + // Look up this pair of hulls by ID + ConvexHull* ch1 = GetHull(hp.m_hullA); + ConvexHull* ch2 = GetHull(hp.m_hullB); + + // If both hulls are still valid, then we merge them, delete the old + // two hulls and recompute the cost matrix for the new combined hull + // we have created + if ( ch1 && ch2 ) + { + // This is the convex hull which results from combining the + // vertices in the two source hulls + ConvexHull* combinedHull = ComputeCombinedConvexHull(*ch1, + *ch2); + // The two old convex hulls are going to get removed + RemoveHull(hp.m_hullA); + RemoveHull(hp.m_hullB); + + m_meshId++; + combinedHull->m_meshId = m_meshId; + tasks.clear(); + tasks.reserve(m_hulls.size()); + + // Compute the cost between this new merged hull + // and all existing convex hulls and then + // add that to the priority queue + for (auto& i : m_hulls) + { + if ( m_canceled ) + { + break; + } + ConvexHull* secondHull = i.second; + CostTask ct; + ct.m_hullA = combinedHull; + ct.m_hullB = secondHull; + ct.m_this = this; + if ( DoFastCost(ct) ) + { + } + else + { + tasks.push_back(std::move(ct)); + } + } + m_hulls[combinedHull->m_meshId] = combinedHull; + // See how many merge cost tasks were posted + // If there are 8 or more and we are running asynchronously, then do them that way. +#if !VHACD_DISABLE_THREADING + if ( m_threadPool && tasks.size() >= 2) + { + for (CostTask& task : tasks) + { + task.m_future = m_threadPool->enqueue([&task] + { + computeMergeCostTask(task); + }); + } + + for (CostTask& task : tasks) + { + task.m_future.get(); + } + } + else +#endif + { + for (CostTask& task : tasks) + { + PerformMergeCostTask(task); + } + } + + for (CostTask& task : tasks) + { + AddCostToPriorityQueue(task); + } + } + } + // Ok...once we are done, we copy the results! + m_meshId -= 0; + ProgressUpdate(Stages::FINALIZING_RESULTS, + 0, + "Finalizing results"); + for (auto& i : m_hulls) + { + if ( m_canceled ) + { + break; + } + ConvexHull* ch = i.second; + // We now must reduce the convex hull + if ( ch->m_points.size() > m_params.m_maxNumVerticesPerCH || m_params.m_shrinkWrap) + { + ConvexHull* reduce = ComputeReducedConvexHull(*ch, + m_params.m_maxNumVerticesPerCH, + m_params.m_shrinkWrap); + ReleaseConvexHull(ch); + ch = reduce; + } + ScaleOutputConvexHull(*ch); + ch->m_meshId = m_meshId; + m_meshId++; + m_convexHulls.push_back(ch); + } + m_hulls.clear(); // since the hulls were moved into the output list, we don't need to delete them from this container + ProgressUpdate(Stages::FINALIZING_RESULTS, + 100, + "Finalized results complete"); + } + } + else + { + ProgressUpdate(Stages::FINALIZING_RESULTS, + 0, + "Finalizing results"); + m_meshId = 0; + for (auto& ch : hulls) + { + // We now must reduce the convex hull + if ( ch->m_points.size() > m_params.m_maxNumVerticesPerCH || m_params.m_shrinkWrap ) + { + ConvexHull* reduce = ComputeReducedConvexHull(*ch, + m_params.m_maxNumVerticesPerCH, + m_params.m_shrinkWrap); + ReleaseConvexHull(ch); + ch = reduce; + } + ScaleOutputConvexHull(*ch); + ch->m_meshId = m_meshId; + m_meshId++; + m_convexHulls.push_back(ch); + } + m_hulls.clear(); + ProgressUpdate(Stages::FINALIZING_RESULTS, + 100, + "Finalized results"); + } + } +} + +double VHACDImpl::ComputeConvexHullVolume(const ConvexHull& sm) +{ + double totalVolume = 0; + VHACD::Vect3 bary(0, 0, 0); + for (uint32_t i = 0; i < sm.m_points.size(); i++) + { + VHACD::Vect3 p(sm.m_points[i]); + bary += p; + } + bary /= double(sm.m_points.size()); + + for (uint32_t i = 0; i < sm.m_triangles.size(); i++) + { + uint32_t i1 = sm.m_triangles[i].mI0; + uint32_t i2 = sm.m_triangles[i].mI1; + uint32_t i3 = sm.m_triangles[i].mI2; + + VHACD::Vect3 ver0(sm.m_points[i1]); + VHACD::Vect3 ver1(sm.m_points[i2]); + VHACD::Vect3 ver2(sm.m_points[i3]); + + totalVolume += ComputeVolume4(ver0, + ver1, + ver2, + bary); + + } + totalVolume = totalVolume / double(6.0); + return totalVolume; +} + +double VHACDImpl::ComputeVolume4(const VHACD::Vect3& a, + const VHACD::Vect3& b, + const VHACD::Vect3& c, + const VHACD::Vect3& d) +{ + VHACD::Vect3 ad = a - d; + VHACD::Vect3 bd = b - d; + VHACD::Vect3 cd = c - d; + VHACD::Vect3 bcd = bd.Cross(cd); + double dot = ad.Dot(bcd); + return dot; +} + +double VHACDImpl::ComputeConcavity(double volumeSeparate, + double volumeCombined, + double volumeMesh) +{ + return fabs(volumeSeparate - volumeCombined) / volumeMesh; +} + +bool VHACDImpl::DoFastCost(CostTask& mt) +{ + bool ret = false; + + ConvexHull* ch1 = mt.m_hullA; + ConvexHull* ch2 = mt.m_hullB; + + VHACD::BoundsAABB ch1b(ch1->mBmin, + ch1->mBmax); + VHACD::BoundsAABB ch2b(ch2->mBmin, + ch2->mBmax); + if (!ch1b.Intersects(ch2b)) + { + VHACD::BoundsAABB b = ch1b.Union(ch2b); + + double combinedVolume = b.Volume(); + double concavity = ComputeConcavity(ch1->m_volume + ch2->m_volume, + combinedVolume, + m_overallHullVolume); + HullPair hp(ch1->m_meshId, + ch2->m_meshId, + concavity); + m_hullPairQueue.push(hp); + ret = true; + } + return ret; +} + +void VHACDImpl::PerformMergeCostTask(CostTask& mt) +{ + ConvexHull* ch1 = mt.m_hullA; + ConvexHull* ch2 = mt.m_hullB; + + double volume1 = ch1->m_volume; + double volume2 = ch2->m_volume; + + ConvexHull* combined = ComputeCombinedConvexHull(*ch1, + *ch2); // Build the combined convex hull + double combinedVolume = ComputeConvexHullVolume(*combined); // get the combined volume + mt.m_concavity = ComputeConcavity(volume1 + volume2, + combinedVolume, + m_overallHullVolume); + ReleaseConvexHull(combined); +} + +IVHACD::ConvexHull* VHACDImpl::ComputeReducedConvexHull(const ConvexHull& ch, + uint32_t maxVerts, + bool projectHullVertices) +{ + SimpleMesh sourceConvexHull; + + sourceConvexHull.m_vertices = ch.m_points; + sourceConvexHull.m_indices = ch.m_triangles; + + ShrinkWrap(sourceConvexHull, + m_AABBTree, + maxVerts, + m_voxelScale, + projectHullVertices); + + ConvexHull *ret = new ConvexHull; + + ret->m_points = sourceConvexHull.m_vertices; + ret->m_triangles = sourceConvexHull.m_indices; + + VHACD::BoundsAABB b = VHACD::BoundsAABB(ret->m_points).Inflate(double(0.1)); + ret->mBmin = b.GetMin(); + ret->mBmax = b.GetMax(); + ComputeCentroid(ret->m_points, + ret->m_triangles, + ret->m_center); + + ret->m_volume = ComputeConvexHullVolume(*ret); + + // Return the convex hull + return ret; +} + +IVHACD::ConvexHull* VHACDImpl::ComputeCombinedConvexHull(const ConvexHull& sm1, + const ConvexHull& sm2) +{ + uint32_t vcount = uint32_t(sm1.m_points.size() + sm2.m_points.size()); // Total vertices from both hulls + std::vector vertices(vcount); + auto it = std::copy(sm1.m_points.begin(), + sm1.m_points.end(), + vertices.begin()); + std::copy(sm2.m_points.begin(), + sm2.m_points.end(), + it); + + VHACD::QuickHull qh; + qh.ComputeConvexHull(vertices, + vcount); + + ConvexHull* ret = new ConvexHull; + ret->m_points = qh.GetVertices(); + ret->m_triangles = qh.GetIndices(); + + ret->m_volume = ComputeConvexHullVolume(*ret); + + VHACD::BoundsAABB b = VHACD::BoundsAABB(qh.GetVertices()).Inflate(double(0.1)); + ret->mBmin = b.GetMin(); + ret->mBmax = b.GetMax(); + ComputeCentroid(ret->m_points, + ret->m_triangles, + ret->m_center); + + // Return the convex hull + return ret; +} + +IVHACD::ConvexHull* VHACDImpl::GetHull(uint32_t index) +{ + ConvexHull* ret = nullptr; + + auto found = m_hulls.find(index); + if ( found != m_hulls.end() ) + { + ret = found->second; + } + + return ret; +} + +bool VHACDImpl::RemoveHull(uint32_t index) +{ + bool ret = false; + auto found = m_hulls.find(index); + if ( found != m_hulls.end() ) + { + ret = true; + ReleaseConvexHull(found->second); + m_hulls.erase(found); + } + return ret; +} + +IVHACD::ConvexHull* VHACDImpl::CopyConvexHull(const ConvexHull& source) +{ + ConvexHull *ch = new ConvexHull; + *ch = source; + + return ch; +} + +const char* VHACDImpl::GetStageName(Stages stage) const +{ + const char *ret = "unknown"; + switch ( stage ) + { + case Stages::COMPUTE_BOUNDS_OF_INPUT_MESH: + ret = "COMPUTE_BOUNDS_OF_INPUT_MESH"; + break; + case Stages::REINDEXING_INPUT_MESH: + ret = "REINDEXING_INPUT_MESH"; + break; + case Stages::CREATE_RAYCAST_MESH: + ret = "CREATE_RAYCAST_MESH"; + break; + case Stages::VOXELIZING_INPUT_MESH: + ret = "VOXELIZING_INPUT_MESH"; + break; + case Stages::BUILD_INITIAL_CONVEX_HULL: + ret = "BUILD_INITIAL_CONVEX_HULL"; + break; + case Stages::PERFORMING_DECOMPOSITION: + ret = "PERFORMING_DECOMPOSITION"; + break; + case Stages::INITIALIZING_CONVEX_HULLS_FOR_MERGING: + ret = "INITIALIZING_CONVEX_HULLS_FOR_MERGING"; + break; + case Stages::COMPUTING_COST_MATRIX: + ret = "COMPUTING_COST_MATRIX"; + break; + case Stages::MERGING_CONVEX_HULLS: + ret = "MERGING_CONVEX_HULLS"; + break; + case Stages::FINALIZING_RESULTS: + ret = "FINALIZING_RESULTS"; + break; + case Stages::NUM_STAGES: + // Should be unreachable, here to silence enumeration value not handled in switch warnings + // GCC/Clang's -Wswitch + break; + } + return ret; +} + +void VHACDImpl::ProgressUpdate(Stages stage, + double stageProgress, + const char* operation) +{ + if ( m_params.m_callback ) + { + double overallProgress = (double(stage) * 100) / double(Stages::NUM_STAGES); + const char *s = GetStageName(stage); + m_params.m_callback->Update(overallProgress, + stageProgress, + s, + operation); + } +} + +bool VHACDImpl::IsCanceled() const +{ + return m_canceled; +} + +IVHACD* CreateVHACD(void) +{ + VHACDImpl *ret = new VHACDImpl; + return static_cast< IVHACD *>(ret); +} + +IVHACD* CreateVHACD(void); + +#if !VHACD_DISABLE_THREADING + +class LogMessage +{ +public: + double m_overallProgress{ double(-1.0) }; + double m_stageProgress{ double(-1.0) }; + std::string m_stage; + std::string m_operation; +}; + +class VHACDAsyncImpl : public VHACD::IVHACD, + public VHACD::IVHACD::IUserCallback, + VHACD::IVHACD::IUserLogger, + VHACD::IVHACD::IUserTaskRunner +{ +public: + VHACDAsyncImpl() = default; + + ~VHACDAsyncImpl() override; + + void Cancel() override final; + + bool Compute(const float* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) override final; + + bool Compute(const double* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) override final; + + bool GetConvexHull(const uint32_t index, + VHACD::IVHACD::ConvexHull& ch) const override final; + + uint32_t GetNConvexHulls() const override final; + + void Clean() override final; // release internally allocated memory + + void Release() override final; // release IVHACD + + // Will compute the center of mass of the convex hull decomposition results and return it + // in 'centerOfMass'. Returns false if the center of mass could not be computed. + bool ComputeCenterOfMass(double centerOfMass[3]) const override; + + bool IsReady() const override final; + + /** + * At the request of LegionFu : out_look@foxmail.com + * This method will return which convex hull is closest to the source position. + * You can use this method to figure out, for example, which vertices in the original + * source mesh are best associated with which convex hull. + * + * @param pos : The input 3d position to test against + * + * @return : Returns which convex hull this position is closest to. + */ + uint32_t findNearestConvexHull(const double pos[3], + double& distanceToHull) override final; + + void Update(const double overallProgress, + const double stageProgress, + const char* const stage, + const char *operation) override final; + + void Log(const char* const msg) override final; + + void* StartTask(std::function func) override; + + void JoinTask(void* Task) override; + + bool Compute(const Parameters params); + + bool ComputeNow(const std::vector& points, + const std::vector& triangles, + const Parameters& _desc); + + // As a convenience for the calling application we only send it update and log messages from it's own main + // thread. This reduces the complexity burden on the caller by making sure it only has to deal with log + // messages in it's main application thread. + void ProcessPendingMessages() const; + +private: + VHACD::VHACDImpl m_VHACD; + std::vector m_vertices; + std::vector m_indices; + VHACD::IVHACD::IUserCallback* m_callback{ nullptr }; + VHACD::IVHACD::IUserLogger* m_logger{ nullptr }; + VHACD::IVHACD::IUserTaskRunner* m_taskRunner{ nullptr }; + void* m_task{ nullptr }; + std::atomic m_running{ false }; + std::atomic m_cancel{ false }; + + // Thread safe caching mechanism for messages and update status. + // This is so that caller always gets messages in his own thread + // Member variables are marked as 'mutable' since the message dispatch function + // is called from const query methods. + mutable std::mutex m_messageMutex; + mutable std::vector m_messages; + mutable std::atomic m_haveMessages{ false }; +}; + +VHACDAsyncImpl::~VHACDAsyncImpl() +{ + Cancel(); +} + +void VHACDAsyncImpl::Cancel() +{ + m_cancel = true; + m_VHACD.Cancel(); + + if (m_task) + { + m_taskRunner->JoinTask(m_task); // Wait for the thread to fully exit before we delete the instance + m_task = nullptr; + } + m_cancel = false; // clear the cancel semaphore +} + +bool VHACDAsyncImpl::Compute(const float* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) +{ + m_vertices.reserve(countPoints); + for (uint32_t i = 0; i < countPoints; ++i) + { + m_vertices.emplace_back(points[i * 3 + 0], + points[i * 3 + 1], + points[i * 3 + 2]); + } + + m_indices.reserve(countTriangles); + for (uint32_t i = 0; i < countTriangles; ++i) + { + m_indices.emplace_back(triangles[i * 3 + 0], + triangles[i * 3 + 1], + triangles[i * 3 + 2]); + } + + return Compute(params); +} + +bool VHACDAsyncImpl::Compute(const double* const points, + const uint32_t countPoints, + const uint32_t* const triangles, + const uint32_t countTriangles, + const Parameters& params) +{ + // We need to copy the input vertices and triangles into our own buffers so we can operate + // on them safely from the background thread. + // Can't be local variables due to being asynchronous + m_vertices.reserve(countPoints); + for (uint32_t i = 0; i < countPoints; ++i) + { + m_vertices.emplace_back(points[i * 3 + 0], + points[i * 3 + 1], + points[i * 3 + 2]); + } + + m_indices.reserve(countTriangles); + for (uint32_t i = 0; i < countTriangles; ++i) + { + m_indices.emplace_back(triangles[i * 3 + 0], + triangles[i * 3 + 1], + triangles[i * 3 + 2]); + } + + return Compute(params); +} + +bool VHACDAsyncImpl::GetConvexHull(const uint32_t index, + VHACD::IVHACD::ConvexHull& ch) const +{ + return m_VHACD.GetConvexHull(index, + ch); +} + +uint32_t VHACDAsyncImpl::GetNConvexHulls() const +{ + ProcessPendingMessages(); + return m_VHACD.GetNConvexHulls(); +} + +void VHACDAsyncImpl::Clean() +{ + Cancel(); + m_VHACD.Clean(); +} + +void VHACDAsyncImpl::Release() +{ + delete this; +} + +bool VHACDAsyncImpl::ComputeCenterOfMass(double centerOfMass[3]) const +{ + bool ret = false; + + centerOfMass[0] = 0; + centerOfMass[1] = 0; + centerOfMass[2] = 0; + + if (IsReady()) + { + ret = m_VHACD.ComputeCenterOfMass(centerOfMass); + } + return ret; +} + +bool VHACDAsyncImpl::IsReady() const +{ + ProcessPendingMessages(); + return !m_running; +} + +uint32_t VHACDAsyncImpl::findNearestConvexHull(const double pos[3], + double& distanceToHull) +{ + uint32_t ret = 0; // The default return code is zero + + distanceToHull = 0; + // First, make sure that we have valid and completed results + if (IsReady() ) + { + ret = m_VHACD.findNearestConvexHull(pos,distanceToHull); + } + + return ret; +} + +void VHACDAsyncImpl::Update(const double overallProgress, + const double stageProgress, + const char* const stage, + const char* operation) +{ + m_messageMutex.lock(); + LogMessage m; + m.m_operation = std::string(operation); + m.m_overallProgress = overallProgress; + m.m_stageProgress = stageProgress; + m.m_stage = std::string(stage); + m_messages.push_back(m); + m_haveMessages = true; + m_messageMutex.unlock(); +} + +void VHACDAsyncImpl::Log(const char* const msg) +{ + m_messageMutex.lock(); + LogMessage m; + m.m_operation = std::string(msg); + m_haveMessages = true; + m_messages.push_back(m); + m_messageMutex.unlock(); +} + +void* VHACDAsyncImpl::StartTask(std::function func) +{ + return new std::thread(func); +} + +void VHACDAsyncImpl::JoinTask(void* Task) +{ + std::thread* t = static_cast(Task); + t->join(); + delete t; +} + +bool VHACDAsyncImpl::Compute(Parameters params) +{ + Cancel(); // if we previously had a solution running; cancel it. + + m_taskRunner = params.m_taskRunner ? params.m_taskRunner : this; + params.m_taskRunner = m_taskRunner; + + m_running = true; + m_task = m_taskRunner->StartTask([this, params]() { + ComputeNow(m_vertices, + m_indices, + params); + // If we have a user provided callback and the user did *not* call 'cancel' we notify him that the + // task is completed. However..if the user selected 'cancel' we do not send a completed notification event. + if (params.m_callback && !m_cancel) + { + params.m_callback->NotifyVHACDComplete(); + } + m_running = false; + }); + return true; +} + +bool VHACDAsyncImpl::ComputeNow(const std::vector& points, + const std::vector& triangles, + const Parameters& _desc) +{ + uint32_t ret = 0; + + Parameters desc; + m_callback = _desc.m_callback; + m_logger = _desc.m_logger; + + desc = _desc; + // Set our intercepting callback interfaces if non-null + desc.m_callback = _desc.m_callback ? this : nullptr; + desc.m_logger = _desc.m_logger ? this : nullptr; + + // If not task runner provided, then use the default one + if (desc.m_taskRunner == nullptr) + { + desc.m_taskRunner = this; + } + + bool ok = m_VHACD.Compute(points, + triangles, + desc); + if (ok) + { + ret = m_VHACD.GetNConvexHulls(); + } + + return ret ? true : false; +} + +void VHACDAsyncImpl::ProcessPendingMessages() const +{ + if (m_cancel) + { + return; + } + if ( m_haveMessages ) + { + m_messageMutex.lock(); + for (auto& i : m_messages) + { + if ( i.m_overallProgress == -1 ) + { + if ( m_logger ) + { + m_logger->Log(i.m_operation.c_str()); + } + } + else if ( m_callback ) + { + m_callback->Update(i.m_overallProgress, + i.m_stageProgress, + i.m_stage.c_str(), + i.m_operation.c_str()); + } + } + m_messages.clear(); + m_haveMessages = false; + m_messageMutex.unlock(); + } +} + +IVHACD* CreateVHACD_ASYNC() +{ + VHACDAsyncImpl* m = new VHACDAsyncImpl; + return static_cast(m); +} +#endif + +} // namespace VHACD + +#ifdef _MSC_VER +#pragma warning(pop) +#endif // _MSC_VER + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif // __GNUC__ + +#endif // ENABLE_VHACD_IMPLEMENTATION + +#endif // VHACD_H From cd159c244fd9887ed2794a075ada6a8d6abe16fb Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Tue, 26 Mar 2024 14:00:52 -0700 Subject: [PATCH 37/37] Add new function in MeshManager to merge all submeshes of a mesh into one (#588) --------- Signed-off-by: Ian Chen --- graphics/include/gz/common/MeshManager.hh | 8 +- graphics/src/ColladaLoader_TEST.cc | 2 - graphics/src/MeshManager.cc | 70 +++++++++++++++++ graphics/src/MeshManager_TEST.cc | 93 ++++++++++++++++++++++- 4 files changed, 168 insertions(+), 5 deletions(-) diff --git a/graphics/include/gz/common/MeshManager.hh b/graphics/include/gz/common/MeshManager.hh index 61b19f3e3..4b7873f86 100644 --- a/graphics/include/gz/common/MeshManager.hh +++ b/graphics/include/gz/common/MeshManager.hh @@ -290,11 +290,17 @@ namespace gz /// \param[in] _voxelResolution Voxel resolution to use. Higher value /// produces more accurate shapes. /// \return A vector of decomposed submeshes. - public: std::vector ConvexDecomposition( + public: static std::vector ConvexDecomposition( const common::SubMesh &_subMesh, std::size_t _maxConvexHulls = 16u, std::size_t _voxelResolution = 200000u); + /// \brief Merge all submeshes from one mesh into one single submesh. + /// \param[in] _mesh Input mesh with submeshes to merge. + /// \return A new mesh with the submeshes merged. + public: static std::unique_ptr MergeSubMeshes( + const common::Mesh &_mesh); + /// \brief Converts a vector of polylines into a table of vertices and /// a list of edges (each made of 2 points from the table of vertices. /// \param[in] _polys the polylines diff --git a/graphics/src/ColladaLoader_TEST.cc b/graphics/src/ColladaLoader_TEST.cc index e8ea61f80..8bd159b7f 100644 --- a/graphics/src/ColladaLoader_TEST.cc +++ b/graphics/src/ColladaLoader_TEST.cc @@ -242,8 +242,6 @@ TEST_F(ColladaLoader, TexCoordSets) EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoordBySet(0u, 0u)); EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoordBySet(1u, 0u)); EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoordBySet(2u, 0u)); - EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoordBySet(1u, 0u)); - EXPECT_EQ(math::Vector2d(0, 1), subMeshB->TexCoordBySet(2u, 0u)); EXPECT_TRUE(subMeshB->HasTexCoordBySet(0u, 0u)); EXPECT_TRUE(subMeshB->HasTexCoordBySet(1u, 0u)); diff --git a/graphics/src/MeshManager.cc b/graphics/src/MeshManager.cc index f1c42ea41..2c62d393b 100644 --- a/graphics/src/MeshManager.cc +++ b/graphics/src/MeshManager.cc @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -1760,3 +1761,72 @@ MeshManager::ConvexDecomposition(const SubMesh &_subMesh, return decomposed; } + +////////////////////////////////////////////////// +std::unique_ptr MeshManager::MergeSubMeshes(const Mesh &_mesh) +{ + SubMesh mergedSubMesh; + + // The final merged submesh should contain all the texcoord sets + // in the original submeshes. Determine the max texcoord sets we need. + unsigned int maxTexCoordSet = 0u; + for (unsigned int i = 0u; i < _mesh.SubMeshCount(); ++i) + { + auto submesh = _mesh.SubMeshByIndex(i).lock(); + maxTexCoordSet = (maxTexCoordSet > submesh->TexCoordSetCount()) ? + maxTexCoordSet : submesh->TexCoordSetCount(); + } + + unsigned int indexOffset = 0u; + for (unsigned int i = 0u; i < _mesh.SubMeshCount(); ++i) + { + auto submesh = _mesh.SubMeshByIndex(i).lock(); + // vertices + for (unsigned int j = 0; j < submesh->VertexCount(); ++j) + { + mergedSubMesh.AddVertex(submesh->Vertex(j)); + } + + // normals + for (unsigned int j = 0; j < submesh->NormalCount(); ++j) + { + mergedSubMesh.AddNormal(submesh->Normal(j)); + } + + // indices - the index needs to start at an offset for each new submesh + for (unsigned int j = 0; j < submesh->IndexCount(); ++j) + { + mergedSubMesh.AddIndex(submesh->Index(j) + indexOffset); + } + indexOffset += submesh->VertexCount(); + + // texcoords + for (unsigned int j = 0; j < maxTexCoordSet; ++j) + { + if (j < submesh->TexCoordSetCount()) + { + // Populate texcoords from input submesh + for (unsigned int k = 0; k < submesh->TexCoordCountBySet(j); ++k) + { + mergedSubMesh.AddTexCoordBySet(submesh->TexCoordBySet(k, j), j); + } + } + else + { + // Set texcoord to zero if the input submesh does not have that many + // texcoord sets. Note the texcoord count should be the same as vertex + // count. + for (unsigned int k = 0; k < submesh->VertexCount(); ++k) + { + mergedSubMesh.AddTexCoordBySet(math::Vector2d::Zero, j); + } + } + } + } + auto mesh = std::make_unique(); + mesh->SetName(_mesh.Name() + "_merged"); + mergedSubMesh.SetName(mesh->Name() + "_submesh"); + mesh->AddSubMesh(mergedSubMesh); + + return mesh; +} diff --git a/graphics/src/MeshManager_TEST.cc b/graphics/src/MeshManager_TEST.cc index 568e06611..dd7f8acb6 100644 --- a/graphics/src/MeshManager_TEST.cc +++ b/graphics/src/MeshManager_TEST.cc @@ -304,7 +304,7 @@ TEST_F(MeshManager, ConvexDecomposition) std::size_t maxConvexHulls = 4; std::size_t resolution = 1000; auto submesh = boxMesh->SubMeshByIndex(0u).lock(); - auto decomposed = std::move(mgr->ConvexDecomposition( + auto decomposed = std::move(common::MeshManager::ConvexDecomposition( *(submesh.get()), maxConvexHulls, resolution)); // Decomposing a box should just produce a box @@ -321,7 +321,7 @@ TEST_F(MeshManager, ConvexDecomposition) ASSERT_NE(nullptr, drillMesh); EXPECT_EQ(1u, drillMesh->SubMeshCount()); submesh = drillMesh->SubMeshByIndex(0u).lock(); - decomposed = std::move(mgr->ConvexDecomposition( + decomposed = std::move(common::MeshManager::ConvexDecomposition( *(submesh.get()), maxConvexHulls, resolution)); // A drill should be decomposed into multiple submeshes @@ -337,4 +337,93 @@ TEST_F(MeshManager, ConvexDecomposition) } } +///////////////////////////////////////////////// +TEST_F(MeshManager, MergeSubMeshes) +{ + auto mgr = common::MeshManager::Instance(); + const common::Mesh *mesh = mgr->Load( + common::testing::TestFile("data", + "multiple_texture_coordinates_triangle.dae")); + ASSERT_NE(nullptr, mesh); + EXPECT_EQ(2u, mesh->SubMeshCount()); + auto submesh = mesh->SubMeshByIndex(0u).lock(); + ASSERT_NE(nullptr, submesh); + EXPECT_EQ(3u, submesh->VertexCount()); + EXPECT_EQ(3u, submesh->NormalCount()); + EXPECT_EQ(3u, submesh->IndexCount()); + EXPECT_EQ(2u, submesh->TexCoordSetCount()); + EXPECT_EQ(3u, submesh->TexCoordCountBySet(0)); + EXPECT_EQ(3u, submesh->TexCoordCountBySet(1)); + auto submeshB = mesh->SubMeshByIndex(1u).lock(); + ASSERT_NE(nullptr, submeshB); + EXPECT_EQ(3u, submeshB->VertexCount()); + EXPECT_EQ(3u, submeshB->NormalCount()); + EXPECT_EQ(3u, submeshB->IndexCount()); + EXPECT_EQ(3u, submeshB->TexCoordSetCount()); + EXPECT_EQ(3u, submeshB->TexCoordCountBySet(0)); + EXPECT_EQ(3u, submeshB->TexCoordCountBySet(1)); + EXPECT_EQ(3u, submeshB->TexCoordCountBySet(2)); + + // merge all submeshes into one + auto merged = common::MeshManager::MergeSubMeshes(*mesh); + ASSERT_NE(nullptr, merged); + EXPECT_FALSE(merged->Name().empty()); + EXPECT_EQ(1u, merged->SubMeshCount()); + auto mergedSubmesh = merged->SubMeshByIndex(0u).lock(); + ASSERT_NE(nullptr, mergedSubmesh); + EXPECT_FALSE(mergedSubmesh->Name().empty()); + + // Verify vertice, normals, indice, and texcoord values in the + // final merged submesh + EXPECT_EQ(6u, mergedSubmesh->VertexCount()); + EXPECT_EQ(6u, mergedSubmesh->NormalCount()); + EXPECT_EQ(6u, mergedSubmesh->IndexCount()); + EXPECT_EQ(3u, mergedSubmesh->TexCoordSetCount()); + EXPECT_EQ(6u, mergedSubmesh->TexCoordCountBySet(0)); + EXPECT_EQ(6u, mergedSubmesh->TexCoordCountBySet(1)); + EXPECT_EQ(6u, mergedSubmesh->TexCoordCountBySet(2)); + + EXPECT_EQ(math::Vector3d(0, 0, 0), mergedSubmesh->Vertex(0u)); + EXPECT_EQ(math::Vector3d(10, 0, 0), mergedSubmesh->Vertex(1u)); + EXPECT_EQ(math::Vector3d(10, 10, 0), mergedSubmesh->Vertex(2u)); + EXPECT_EQ(math::Vector3d(10, 0, 0), mergedSubmesh->Vertex(3u)); + EXPECT_EQ(math::Vector3d(20, 0, 0), mergedSubmesh->Vertex(4u)); + EXPECT_EQ(math::Vector3d(20, 10, 0), mergedSubmesh->Vertex(5u)); + + EXPECT_EQ(math::Vector3d(0, 0, 1), mergedSubmesh->Normal(0u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), mergedSubmesh->Normal(1u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), mergedSubmesh->Normal(2u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), mergedSubmesh->Normal(3u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), mergedSubmesh->Normal(4u)); + EXPECT_EQ(math::Vector3d(0, 0, 1), mergedSubmesh->Normal(5u)); + + EXPECT_EQ(0u, mergedSubmesh->Index(0u)); + EXPECT_EQ(1u, mergedSubmesh->Index(1u)); + EXPECT_EQ(2u, mergedSubmesh->Index(2u)); + EXPECT_EQ(3u, mergedSubmesh->Index(3u)); + EXPECT_EQ(4u, mergedSubmesh->Index(4u)); + EXPECT_EQ(5u, mergedSubmesh->Index(5u)); + + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(0u, 0u)); + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(1u, 0u)); + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(2u, 0u)); + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(3u, 0u)); + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(4u, 0u)); + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(5u, 0u)); + + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(0u, 1u)); + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(1u, 1u)); + EXPECT_EQ(math::Vector2d(0, 1), mergedSubmesh->TexCoordBySet(2u, 1u)); + EXPECT_EQ(math::Vector2d(0, 0.5), mergedSubmesh->TexCoordBySet(3u, 1u)); + EXPECT_EQ(math::Vector2d(0, 0.4), mergedSubmesh->TexCoordBySet(4u, 1u)); + EXPECT_EQ(math::Vector2d(0, 0.3), mergedSubmesh->TexCoordBySet(5u, 1u)); + + EXPECT_EQ(math::Vector2d(0, 0), mergedSubmesh->TexCoordBySet(0u, 2u)); + EXPECT_EQ(math::Vector2d(0, 0), mergedSubmesh->TexCoordBySet(1u, 2u)); + EXPECT_EQ(math::Vector2d(0, 0), mergedSubmesh->TexCoordBySet(2u, 2u)); + EXPECT_EQ(math::Vector2d(0, 0.8), mergedSubmesh->TexCoordBySet(3u, 2u)); + EXPECT_EQ(math::Vector2d(0, 0.7), mergedSubmesh->TexCoordBySet(4u, 2u)); + EXPECT_EQ(math::Vector2d(0, 0.6), mergedSubmesh->TexCoordBySet(5u, 2u)); +} + #endif