forked from alibaba/GraphScope
-
Notifications
You must be signed in to change notification settings - Fork 1
323 lines (274 loc) · 11.7 KB
/
gss.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
name: GraphScope Store CI
on:
# Trigger the workflow on push or pull request,
# but only for the main branch
push:
branches:
- main
paths-ignore:
- 'CONTRIBUTORS'
- 'LICENSE'
- 'NOTICE.txt'
- '**.md'
- '**.rst'
- 'docs/**'
- 'demo/**'
- 'scripts/**'
- 'tutorials/**'
pull_request:
branches:
- main
paths:
- 'proto/**'
- 'interactive_engine/**'
- 'python/graphscope/client/**'
- 'charts/graphscope-store/**'
- '.github/workflows/gss.yml'
- '!interactive_engine/**.md'
- '!charts/graphscope-store/**.md'
concurrency:
group: ${{ github.repository }}-${{ github.event.number || github.head_ref || github.sha }}-${{ github.workflow }}
cancel-in-progress: true
env:
GSS_IMAGE: registry.cn-hongkong.aliyuncs.com/graphscope/graphscope-store
jobs:
gremlin-test:
# Require the host is able to run docker without sudo and
# can `ssh localhost` without password, which may need to
# be configured manually when a new self-hosted runner is added.
runs-on: [self-hosted, manylinux2014]
if: ${{ github.repository == 'alibaba/GraphScope' }}
steps:
- uses: actions/checkout@v3
- name: Detect the tmate session
run: |
if grep -v "grep" .github/workflows/gss.yml | grep "action-tmate"; then
echo 'WARNING!!!the self-hosted machine can not run tmate session, please debug it manually'
exit 1
fi
- uses: actions/cache@v3
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
~/.cache/sccache
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Build GraphScope Store With Column Filter Push Down
run: |
. ${HOME}/.graphscope_env
export SCCACHE_DIR=~/.cache/sccache
export RUSTC_WRAPPER=/usr/local/bin/sccache
sccache --start-server
cd ${GITHUB_WORKSPACE}/interactive_engine
mvn clean install -P groot -Drust.compile.mode=debug -DskipTests -Dgroot.compile.feature="column_filter_push_down" --quiet
sccache --show-stats
- name: Gremlin Tests with Column Filter Push Down
run: |
. ${HOME}/.graphscope_env
cd interactive_engine/groot-server
mvn test -P gremlin-test
- name: Build GraphScope Store
run: |
. ${HOME}/.graphscope_env
export SCCACHE_DIR=~/.cache/sccache
export RUSTC_WRAPPER=/usr/local/bin/sccache
cd ${GITHUB_WORKSPACE}/interactive_engine
mvn clean install -P groot -Drust.compile.mode=debug -DskipTests --quiet
sccache --show-stats
- name: Gremlin Test
run: |
. ${HOME}/.graphscope_env
cd interactive_engine/groot-server
mvn test -P gremlin-test
- name: Upload tools for helm test to Artifact
uses: actions/upload-artifact@v3
with:
name: groot
path: |
interactive_engine/assembly/target/groot.tar.gz
interactive_engine/data-load-tool/target/data-load-tool-0.0.1-SNAPSHOT.jar
retention-days: 5
helm-test:
runs-on: [self-hosted, ubuntu2004]
if: ${{ github.repository == 'alibaba/GraphScope' }}
needs: [gremlin-test]
env:
JAVA_HOME: /usr/lib/jvm/default-java
GS_TEST_DIR: ${{ github.workspace }}/gstest
steps:
- uses: actions/checkout@v3
with:
submodules: true
- name: Detect the tmate session
run: |
if grep -v "grep" .github/workflows/gss.yml | grep "action-tmate"; then
echo 'WARNING!!!the self-hosted machine can not run tmate session, please debug it manually'
exit 1
fi
- uses: actions/download-artifact@v3
with:
name: groot
path: artifacts
- name: Set GITHUB_ENV
run: |
short_sha=$(git rev-parse --short HEAD)
echo "SHORT_SHA=${short_sha}" >> $GITHUB_ENV
- name: Prepare Image
run: |
ls -la artifacts/*/*
mv artifacts/assembly/target/groot.tar.gz artifacts/
mv artifacts/data-load-tool/target/data-load-tool-0.0.1-SNAPSHOT.jar artifacts/
docker build -t ${{ env.GSS_IMAGE }}:${SHORT_SHA} \
-f .github/workflows/docker/graphscope-store.Dockerfile .
- name: Prepare the log directory
run: |
# create the helm installation log directory
mkdir -p ${{ github.workspace }}/k8s-ci-helm-installation-logs
# create the demo fresh of helm installation log directory
mkdir -p ${{ github.workspace }}/k8s-ci-demo-fresh-of-helm-installation-logs
# create the demo script of helm installation with pv log directory
mkdir -p ${{ github.workspace }}/k8s-ci-demo-script-of-helm-installation-with-pv-logs
# create the helm test of helm installation with pv log directory
mkdir -p ${{ github.workspace }}/k8s-ci-helm-test-of-helm-installation-with-pv-logs
# create the demo after restart of helm installation with pv log directory
mkdir -p ${{ github.workspace }}/k8s-ci-demo-after-restart-of-helm-installation-with-pv-logs
- name: Setup SSH
run: |
ssh-keygen -t rsa -f ~/.ssh/id_rsa -N ''
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod og-wx ~/.ssh/authorized_keys
echo "StrictHostKeyChecking no" >> ~/.ssh/config
sudo /etc/init.d/ssh start
- name: Create the kubernetes cluster
run: |
# download gstest
git clone -b master --single-branch --depth=1 https://github.com/7br/gstest.git ${GS_TEST_DIR}
minikube start --base-image='registry-vpc.cn-hongkong.aliyuncs.com/graphscope/kicbase:v0.0.30' \
--cpus='12' --memory='32000mb' --disk-size='40000mb'
minikube image load ${{ env.GSS_IMAGE }}:${SHORT_SHA}
- name: Start to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
SHOW_TIMESTAMPS: 'true'
OUTPUT_DIR: ${{ github.workspace }}/helm-installation-logs
NAMESPACES: "gs*,default"
MODE: start
- name: Install graphscope-store with helm
run: |
# update helm dependency
cd ${GITHUB_WORKSPACE}/charts/graphscope-store
helm dependency update
# helm deployment and testing
cd ${GITHUB_WORKSPACE}/charts
helm install ci --set image.tag=${SHORT_SHA} ./graphscope-store
helm test ci --timeout 5m0s
- name: Stop to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
MODE: stop
- name: Start to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
SHOW_TIMESTAMPS: 'true'
OUTPUT_DIR: ${{ github.workspace }}/demo-fresh-of-helm-installation-logs
NAMESPACES: "gs*,default"
MODE: start
- name: Test the helm deployment
run: |
# 1. get gss service endpoint
export GRPC_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services ci-graphscope-store-frontend)
export GREMLIN_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[1].nodePort}" services ci-graphscope-store-frontend)
export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
# 2. deploy hadoop hdfs
tar -zxf /home/runner/hadoop-2.10.1.tar.gz -C /tmp/
cd ${GITHUB_WORKSPACE}/.github/workflows/hadoop_scripts
./prepare_hadoop.sh /tmp/hadoop-2.10.1
export PATH=${PATH}:/tmp/hadoop-2.10.1/bin
# data-load-tool is needed for offline_load.sh
# see .github/workflows/hadoop_scripts/offline_load.sh
export LOADER_DIR=${GITHUB_WORKSPACE}/artifacts
export LOAD_DATA_SCRIPT=${GITHUB_WORKSPACE}/.github/workflows/hadoop_scripts/offline_load.sh
sed s/GRAPH_ENDPOINT/$NODE_IP:$GRPC_PORT/ databuild.config.template > databuild.config
# 3. upload data to HDFS
hadoop fs -mkdir /ldbc_sample || true
hadoop fs -put ${GS_TEST_DIR}/ldbc_sample/person_0_0.csv /ldbc_sample/person_0_0.csv
hadoop fs -put ${GS_TEST_DIR}/ldbc_sample/person_knows_person_0_0.csv /ldbc_sample/person_knows_person_0_0.csv
# python test
cd ${GITHUB_WORKSPACE}/python
python3 -m pip install -r ./requirements.txt --user
python3 -m pip install -r ./requirements-dev.txt --user
python3 -m pip install pytest-cov --user
python3 setup.py build_proto
python3 -m pytest -s -vvv graphscope/tests/kubernetes/test_store_service.py -k test_demo_fresh
- name: Stop to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
MODE: stop
- name: Start to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
SHOW_TIMESTAMPS: 'true'
OUTPUT_DIR: ${{ github.workspace }}/demo-script-of-helm-installation-with-pv-logs
NAMESPACES: "gs*,default"
MODE: start
- name: restart helm and run demo with the PersistentVolume
run: |
# restart helm and run demo with the PersistentVolume
helm uninstall ci
sleep 30
cd ${GITHUB_WORKSPACE}/charts
helm install ci --set image.tag=${SHORT_SHA} ./graphscope-store
- name: Stop to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
MODE: stop
- name: Start to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
SHOW_TIMESTAMPS: 'true'
OUTPUT_DIR: ${{ github.workspace }}/helm-test-of-helm-installation-with-pv-logs
NAMESPACES: "gs*,default"
MODE: start
- name: Helm Test with Helm Deployment and PersistentVolume
run: |
# helm test and python test on the restarted store
export GRPC_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services ci-graphscope-store-frontend)
export GREMLIN_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[1].nodePort}" services ci-graphscope-store-frontend)
export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
helm test ci --timeout 10m0s
- name: Stop to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
MODE: stop
- name: Start to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
SHOW_TIMESTAMPS: 'true'
OUTPUT_DIR: ${{ github.workspace }}/demo-after-restart-of-helm-installation-with-pv-logs
NAMESPACES: "gs*,default"
MODE: start
- name: Python Test with Helm Deployment and PersistentVolume
run: |
export GRPC_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services ci-graphscope-store-frontend)
export GREMLIN_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[1].nodePort}" services ci-graphscope-store-frontend)
export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
cd ${GITHUB_WORKSPACE}/python
python3 -m pytest -s -vvv graphscope/tests/kubernetes/test_store_service.py -k test_demo_after_restart
- name: upload the k8s logs to artifact
if: ${{ always() }}
uses: actions/upload-artifact@v3
with:
name: k8s-test-logs
path: ${{ github.workspace }}/k8s-ci-*-logs
- name: Stop to export kubernetes logs
uses: dashanji/kubernetes-log-export-action@v4
env:
MODE: stop