forked from allenai/allennlp
-
Notifications
You must be signed in to change notification settings - Fork 0
235 lines (198 loc) · 7.11 KB
/
ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
name: CI
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
pull_request:
branches:
- main
push:
branches:
- main
release:
types: [published]
schedule:
- cron: '37 11 * * 1,2,3,4,5' # early morning (11:37 UTC / 4:37 AM PDT) Monday - Friday
env:
# NOTE: Need to update `TORCH_VERSION`, and `TORCH_*_INSTALL` for new torch releases.
TORCH_VERSION: 1.13.1
# TORCH_CPU_INSTALL: conda install pytorch torchvision torchaudio cpuonly -c pytorch
TORCH_CPU_INSTALL: pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu -c constraints.txt
# Change this to invalidate existing cache.
CACHE_PREFIX: v11
# Disable tokenizers parallelism because this doesn't help, and can cause issues in distributed tests.
TOKENIZERS_PARALLELISM: 'false'
# Disable multithreading with OMP because this can lead to dead-locks in distributed tests.
OMP_NUM_THREADS: '1'
# See https://github.com/pytorch/pytorch/issues/37377#issuecomment-677851112.
MKL_THREADING_LAYER: 'GNU'
DEFAULT_PYTHON_VERSION: 3.8
# For debugging GPU tests.
CUDA_LAUNCH_BLOCKING: '1'
defaults:
run:
shell: bash -l {0}
jobs:
changelog:
name: CHANGELOG
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v3
- name: Check if source files have changed
run: |
git diff --name-only $(git merge-base origin/main HEAD) | grep '^allennlp/.*\.py$' && echo "source_files_changed=true" >> $GITHUB_ENV || echo "source_files_changed=false" >> $GITHUB_ENV
- name: Check that CHANGELOG has been updated
if: env.source_files_changed == 'true'
run: |
# If this step fails, this means you haven't updated the CHANGELOG.md
# file with notes on your contribution.
git diff --name-only $(git merge-base origin/main HEAD) | grep '^CHANGELOG.md$' && echo "Thanks for helping keep our CHANGELOG up-to-date!"
style:
name: Style
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.DEFAULT_PYTHON_VERSION }}
- name: Install requirements
run: |
grep -E '^black' dev-requirements.txt | xargs pip install
- name: Debug info
run: |
pip freeze
- name: Run black
run: |
black --check .
checks:
name: ${{ matrix.task.name }}
runs-on: ${{ matrix.task.runs_on }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
task:
- name: Lint
runs_on: ubuntu-latest
coverage_report: false
torch_platform: cpu
run: |
make flake8
make typecheck
- name: CPU Tests
runs_on: ubuntu-latest
coverage_report: true
torch_platform: cpu
run: make test
- name: Model Tests
runs_on: ubuntu-latest
coverage_report: true
torch_platform: cpu
run: |
cd allennlp-models
make test-with-cov COV=allennlp
mv coverage.xml ../
steps:
- uses: actions/checkout@v3
- uses: conda-incubator/setup-miniconda@v2
with:
miniconda-version: "latest"
python-version: ${{ env.DEFAULT_PYTHON_VERSION }}
- name: Set build variables
run: |
# Get the exact Python version to use in the cache key.
echo "PYTHON_VERSION=$(python --version)" >> $GITHUB_ENV
echo "RUNNER_ARCH=$(uname -m)" >> $GITHUB_ENV
# Use week number in cache key so we can refresh the cache weekly.
echo "WEEK_NUMBER=$(date +%V)" >> $GITHUB_ENV
- name: Set build variables (CPU only)
if: matrix.task.torch_platform == 'cpu'
run: |
echo "TORCH_INSTALL=$TORCH_CPU_INSTALL" >> $GITHUB_ENV
- uses: actions/cache@v3
id: virtualenv-cache
with:
path: .venv
key: ${{ env.CACHE_PREFIX }}-${{ env.WEEK_NUMBER }}-${{ runner.os }}-${{ env.RUNNER_ARCH }}-${{ env.PYTHON_VERSION }}-${{ matrix.task.torch_platform }}-${{ hashFiles('setup.py') }}-${{ hashFiles('*requirements.txt', 'constraints.txt') }}
- name: Setup virtual environment (no cache hit)
if: steps.virtualenv-cache.outputs.cache-hit != 'true'
run: |
python${{ env.DEFAULT_PYTHON_VERSION }} -m venv .venv
source .venv/bin/activate
make install TORCH_INSTALL="$TORCH_INSTALL"
- name: Setup virtual environment (cache hit)
if: steps.virtualenv-cache.outputs.cache-hit == 'true'
run: |
source .venv/bin/activate
pip install --no-deps -e .[all]
make download-extras
- name: Pull and install models repo
if: matrix.task.name == 'Model Tests'
env:
ALLENNLP_VERSION_OVERRIDE: "" # Don't replace the core library.
run: |
source .venv/bin/activate
git clone https://github.com/allenai/allennlp-models.git
cd allennlp-models
# git checkout dependabot/pip/torch-gte-1.7.0-and-lt-1.13.0
pip install -e .[dev,all]
- name: Debug info
run: |
source .venv/bin/activate
pip freeze
- name: Ensure torch up-to-date
run: |
source .venv/bin/activate
python scripts/check_torch_version.py
- name: ${{ matrix.task.name }}
run: |
source .venv/bin/activate
${{ matrix.task.run }}
- name: Prepare coverage report
if: matrix.task.coverage_report
run: |
mkdir coverage
mv coverage.xml coverage/
- name: Save coverage report
if: matrix.task.coverage_report
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.task.name }}-coverage
path: ./coverage
- name: Clean up
if: always()
run: |
# Could run into issues with the cache if we don't uninstall the editable.
# See https://github.com/pypa/pip/issues/4537.
source .venv/bin/activate
pip uninstall --yes allennlp allennlp-models
upload_coverage:
name: Upload Coverage Report
timeout-minutes: 5
if: github.repository == 'EducationalTestingService/allennlp' && (github.event_name == 'push' || github.event_name == 'pull_request')
runs-on: ubuntu-latest
needs: [checks]
steps:
# Need to checkout code to get the coverage config.
- uses: actions/checkout@v3
- name: Download coverage report from CPU tests
uses: actions/download-artifact@v3
with:
name: CPU Tests-coverage
path: coverage/cpu_tests
- name: Download coverage report from model tests
uses: actions/download-artifact@v3
with:
name: Model Tests-coverage
path: coverage/model_tests
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
files: coverage/cpu_tests/coverage.xml,coverage/model_tests/coverage.xml
# Ignore codecov failures as the codecov server is not
# very reliable but we don't want to report a failure
# in the github UI just because the coverage report failed to
# be published.
fail_ci_if_error: false