Skip to content

Commit

Permalink
Add vcpkg manifest
Browse files Browse the repository at this point in the history
  • Loading branch information
bernhardmgruber committed Aug 17, 2023
1 parent bc2ecc3 commit 5ebb87f
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 22 deletions.
24 changes: 2 additions & 22 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ concurrency:
env:
THREADS: 2
CONFIG: RelWithDebInfo
VCPKG_INSTALL: "vcpkg --version; vcpkg install alpaka fmt tinyobjloader boost-mp11 boost-atomic boost-smart-ptr boost-functional boost-container boost-iostreams[core] catch2 xsimd"

jobs:
clang-format:
Expand Down Expand Up @@ -56,9 +55,6 @@ jobs:
- name: install clang-16
run: |
sudo apt install clang-16 libomp-16-dev clang-tidy-16
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
Expand Down Expand Up @@ -90,9 +86,6 @@ jobs:
- name: install lcov
run: |
sudo apt install lcov
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
Expand Down Expand Up @@ -129,16 +122,14 @@ jobs:
- uses: actions/checkout@v3
- name: create llama.hpp
run: ./tools/create-single-header.sh
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- run: vcpkg install # installs manifest in a folder called `vcpkg_installed` next to the manifest
- name: test llama.hpp
run: |
mkdir build
cd build
mkdir llama
cp -p ../single-header/llama.hpp llama
$CXX -std=c++20 -I$VCPKG_INSTALLATION_ROOT/installed/x64-linux/include -I. ../examples/heatequation/heatequation.cpp
$CXX -std=c++20 -I../vcpkg_installed/x64-linux/include -I. ../examples/heatequation/heatequation.cpp
- name: upload llama.hpp
uses: actions/upload-artifact@v3
with:
Expand Down Expand Up @@ -272,11 +263,6 @@ jobs:
if: ${{ matrix.install_extra }}
run: |
sudo apt install ${{ matrix.install_extra }}
- name: vcpkg install dependencies
run: |
# vcpkg fails to build with Intel or nvhpc compilers
if [ ${{ matrix.add_oneapi_repo }} ] || [ ${{ matrix.add_nvcpp_repo }} ]; then unset CXX; fi
eval $VCPKG_INSTALL
- name: download CUDA
if: matrix.cuda_url
run: |
Expand Down Expand Up @@ -346,9 +332,6 @@ jobs:

steps:
- uses: actions/checkout@v3
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
Expand Down Expand Up @@ -379,9 +362,6 @@ jobs:
run: |
brew install llvm libomp pkg-config
echo "CXX is here: $(brew --prefix llvm)/bin/clang++"
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
Expand Down
22 changes: 22 additions & 0 deletions vcpkg.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"$schema": "https://raw.githubusercontent.com/microsoft/vcpkg/master/scripts/vcpkg.schema.json",
"name": "llama",
"version": "0.5.0",
"dependencies": [
"alpaka",
"boost-atomic",
"boost-container",
"boost-core",
"boost-functional",
"boost-mp11",
"boost-smart-ptr",
{
"name": "boost-iostreams",
"default-features": false
},
"catch2",
"fmt",
"tinyobjloader",
"xsimd"
]
}

0 comments on commit 5ebb87f

Please sign in to comment.