CI #580
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: CI | |
on: | |
push: | |
branches: [ master ] | |
pull_request: | |
branches: [ master ] | |
schedule: | |
# Run every Sunday at midnight | |
- cron: '0 0 * * 0' | |
defaults: | |
run: | |
shell: bash -l {0} | |
jobs: | |
build: | |
name: ${{ matrix.name }} | |
runs-on: ubuntu-22.04 | |
strategy: | |
fail-fast: false | |
matrix: | |
include: | |
# Oldest supported versions | |
- name: Linux (CUDA 10.2, Python 3.8, PyTorch 1.11) | |
enable_cuda: true | |
cuda: "10.2.89" | |
gcc: "8.5.*" | |
nvcc: "10.2" | |
python: "3.8.*" | |
torchani: "2.2.*" | |
pytorch: "1.11.*" | |
# Older supported versions | |
- name: Linux (CUDA 11.2, Python 3.9, PyTorch 1.12) | |
enable_cuda: true | |
cuda: "11.2.2" | |
gcc: "10.3.*" | |
nvcc: "11.2" | |
python: "3.9.*" | |
torchani: "2.2.*" | |
pytorch: "1.12.*" | |
# Latest supported versions (with CUDA) | |
- name: Linux (CUDA 11.8, Python 3.10, PyTorch 2.0) | |
enable_cuda: true | |
cuda: "11.8.0" | |
gcc: "10.3.*" | |
nvcc: "11.8" | |
python: "3.10.*" | |
torchani: "2.2.*" | |
pytorch: "2.0.*" | |
# Latest supported versions (without CUDA) | |
- name: Linux (no CUDA, Python 3.10, PyTorch 2.0) | |
enable_cuda: false | |
gcc: "10.3.*" | |
python: "3.10.*" | |
pytorch: "2.0.*" | |
torchani: "2.2.*" | |
steps: | |
- name: Check out | |
uses: actions/checkout@v2 | |
- name: Install CUDA Toolkit | |
uses: Jimver/[email protected] | |
with: | |
cuda: ${{ matrix.cuda }} | |
linux-local-args: '["--toolkit", "--override"]' | |
if: ${{ matrix.enable_cuda }} | |
- name: Install Miniconda | |
uses: conda-incubator/setup-miniconda@v2 | |
with: | |
activate-environment: "" | |
auto-activate-base: true | |
miniforge-variant: Mambaforge | |
- name: Prepare dependencies (with CUDA) | |
if: ${{ matrix.enable_cuda }} | |
run: | | |
sed -i -e "/cudatoolkit/c\ - cudatoolkit ${{ matrix.cuda }}" \ | |
-e "/gxx_linux-64/c\ - gxx_linux-64 ${{ matrix.gcc }}" \ | |
-e "/torchani/c\ - torchani ${{ matrix.torchani }}" \ | |
-e "/nvcc_linux-64/c\ - nvcc_linux-64 ${{ matrix.nvcc }}" \ | |
-e "/python/c\ - python ${{ matrix.python }}" \ | |
-e "/pytorch-gpu/c\ - pytorch-gpu ${{ matrix.pytorch }}" \ | |
environment.yml | |
- name: Prepare dependencies (without CUDA) | |
if: ${{ !matrix.enable_cuda }} | |
run: | | |
sed -i -e "/cudatoolkit/c\ # - cudatoolkit" \ | |
-e "/gxx_linux-64/c\ - gxx_linux-64 ${{ matrix.gcc }}" \ | |
-e "/torchani/c\ - torchani ${{ matrix.torchani }}" \ | |
-e "/nvcc_linux-64/c\ # - nvcc_linux-64" \ | |
-e "/python/c\ - python ${{ matrix.python }}" \ | |
-e "/pytorch-gpu/c\ - pytorch-cpu ${{ matrix.pytorch }}" \ | |
environment.yml | |
- name: Show dependency file | |
run: cat environment.yml | |
- name: Install dependencies | |
run: mamba env create -n nnpops -f environment.yml | |
env: | |
# Needed to install pytorch-gpu on a machine without a GPU | |
CONDA_OVERRIDE_CUDA: ${{ matrix.nvcc }} | |
- name: List conda environment | |
run: | | |
conda activate nnpops | |
conda list | |
- name: Configure, compile, and install | |
run: | | |
conda activate nnpops | |
mkdir build && cd build | |
cmake .. \ | |
-DENABLE_CUDA=${{ matrix.enable_cuda }} \ | |
-DTorch_DIR=$(python -c 'import torch.utils; print(torch.utils.cmake_prefix_path)')/Torch \ | |
-DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX | |
make install | |
- name: Test | |
run: | | |
conda activate nnpops | |
cd build | |
ctest --verbose --exclude-regex TestCuda |