-
Notifications
You must be signed in to change notification settings - Fork 0
/
Taskfile.yml
122 lines (104 loc) · 3.51 KB
/
Taskfile.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# https://taskfile.dev
version: '2'
env:
DOCKER_IMAGE: k8s-spark-example
ENV: '{{default "minikube" .ENV}}'
DOCKER_REGISTRY: '{{default "dcr.flix.tech" .DOCKER_REGISTRY}}'
NAMESPACE: '{{default "default" .NAMESPACE}}'
COMMIT:
sh: git rev-parse --short HEAD
vars:
KUBE_CONTEXT: '{{default "minikube" .KUBE_CONTEXT}}'
HELM_CHART_NAME: k8s-spark-example-helm
DEFAULT_SERVICES: |-
k8s-spark-local-example
k8s-spark-client-example
includes:
dags: ./dags
docker: ./TaskfileDocker.yml
tasks:
clean-pyc:
cmds:
- find . -name '*.pyc' -exec rm -f {} +
- find . -name '*.pyo' -exec rm -f {} +
- find . -name '*~' -exec rm -f {} +
- find . -name '__pycache__' -exec rm -fr {} +
clean-pip:
cmds:
- pip freeze | xargs pip uninstall -y
lint:
cmds:
- pylint --exit-zero python
format:
cmds:
- pip install pre-commit
- pre-commit install
- pre-commit run --all-files
run.local:
deps: [set.k8s.context]
cmds:
- |
eval $(minikube docker-env)
task docker:build
task spark.helm.deploy
start.minikube:
cmds:
- if minikube status | grep Running; then echo "minikube running..."; else echo "starting minikube"; minikube start; fi
default:
cmds:
- echo 'executed on {{if ne .KUBE_CONTEXT "minikube"}}remote{{else}}local{{end}}'
silent: true
spark.helm.deploy:
summary: |
When run locally, you need to specify environment variables
eg. KUBE_CONTEXT=ew1p3 NAMESPACE=data-flux ENV=prod SERVICES=k8s-spark-example task spark.helm.deploy
deps: [set.k8s.context]
cmds:
- >
{{$helm_chart_name := .HELM_CHART_NAME}}
{{range $service := .SERVICES | trim | splitLines -}}
helm upgrade {{$service}} {{$helm_chart_name}} \
--namespace $NAMESPACE \
--install \
--atomic \
--cleanup-on-fail \
--force \
--history-max 3 \
--wait \
--timeout 60s \
--set-string spark_driver.image.repository=$DOCKER_REGISTRY/data/flux/$DOCKER_IMAGE \
--set-string spark_driver.image.tag=$COMMIT \
--values ./helm-values/{{$service}}/values-$ENV.yaml;
{{end}}
vars:
SERVICES: "{{default .DEFAULT_SERVICES .SERVICES}}"
spark.helm.template:
deps: [set.k8s.context]
cmds:
- >
{{$helm_chart_name := .HELM_CHART_NAME}}
{{range $service := .SERVICES | trim | splitLines -}}
helm template {{$service}} {{$helm_chart_name}} \
--namespace $NAMESPACE \
--set-string spark_driver.image.repository=$DOCKER_REGISTRY/data/flux/$DOCKER_IMAGE \
--set-string spark_driver.image.tag=$COMMIT \
--values ./helm-values/{{$service}}/values-$ENV.yaml;
{{end}}
vars:
SERVICES: "{{default .DEFAULT_SERVICES .SERVICES}}"
spark.helm.undeploy:
summary: |
When run locally, you need to specify environment variables
eg. KUBE_CONTEXT=ew1p3 NAMESPACE=data-flux ENV=prod SERVICES=k8s-spark-example task spark.helm.undeploy
deps: [set.k8s.context]
cmds:
- >
{{range $service := .SERVICES | trim | splitLines -}}
helm uninstall --namespace $NAMESPACE {{$service}}
{{end}}
vars:
SERVICES: "{{default .DEFAULT_SERVICES .SERVICES}}"
set.k8s.context:
cmds:
- kubectl config set-context {{.KUBE_CONTEXT}} --namespace=$NAMESPACE
- kubectl config use-context {{.KUBE_CONTEXT}} --namespace=$NAMESPACE