forked from wmorin/docker-airflow-1
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
632 lines (593 loc) · 19.5 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
# Duplicate this file and put your customization here
##
## common settings and setting for the webserver
airflow:
extraConfigmapMounts: []
# - name: extra-metadata
# mountPath: /opt/metadata
# configMap: airflow-metadata
# readOnly: true
#
# Example of configmap mount with subPath
# - name: extra-metadata
# mountPath: /opt/metadata/file.yaml
# configMap: airflow-metadata
# readOnly: true
# subPath: file.yaml
##
## Extra environment variables to mount in the web, scheduler, and worker pods:
extraEnv:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: airflow-hubble
key: aws_access_key_id
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: airflow-hubble
key: aws_secret_access_key
# Production PG User
- name: PROD_PGUSER
valueFrom:
secretKeyRef:
name: airflow-hubble
key: prod_pguser
# Production PG Password
- name: PROD_PGPASS
valueFrom:
secretKeyRef:
name: airflow-hubble
key: prod_pgpass
# Development PG User
- name: DEV_PGUSER
valueFrom:
secretKeyRef:
name: airflow-hubble
key: dev_pguser
# Development PG Password
- name: DEV_PGPASS
valueFrom:
secretKeyRef:
name: airflow-hubble
key: dev_pgpass
# Redshift User
- name: REDSHIFT_USER
valueFrom:
secretKeyRef:
name: airflow-hubble
key: redshift_user
# Redshift Password
- name: REDSHIFT_PASS
valueFrom:
secretKeyRef:
name: airflow-hubble
key: redshift_pass
##
## You will need to define your fernet key:
## Generate fernetKey with:
## python -c "from cryptography.fernet import Fernet; FERNET_KEY = Fernet.generate_key().decode(); print(FERNET_KEY)"
## fernetKey: ABCDABCDABCDABCDABCDABCDABCDABCDABCDABCD
fernetKey: "ttbraK5ptmedcL9yP2NPP5w7xpWfZDSfim0a2MWzuZ4="
service:
annotations: {}
type: ClusterIP
externalPort: 8080
##
## The executor to use.
##
executor: Celery
##
## set the max number of retries during container initialization
initRetryLoop:
##
## base image for webserver/scheduler/workers
## Note: If you want to use airflow HEAD (2.0dev), use the following image:
# image
# repository: stibbons31/docker-airflow-dev
# tag: 2.0dev
## Airflow 2.0 allows changing the value ingress.web.path and ingress.flower.path (see bellow).
## In version < 2.0, changing these paths won't have any effect.
image:
##
## docker-airflow image
repository: 175416825336.dkr.ecr.ap-southeast-1.amazonaws.com/airflow
##
## image tag
tag: hubble-1.0.4
##
## Image pull policy
## values: Always or IfNotPresent
pullPolicy: IfNotPresent
##
## image pull secret for private images
pullSecret:
##
## Set schedulerNumRuns to control how the scheduler behaves:
## -1 will let him looping indefinitively but it will never update the DAG
## 1 will have the scheduler quit after each refresh, but kubernetes will restart it.
##
## A long running scheduler process, at least with the CeleryExecutor, ends up not scheduling
## some tasks. We still don’t know the exact cause, unfortunately. Airflow has a built-in
## workaround in the form of the `num_runs` flag.
## Airflow runs with num_runs set to 5.
##
## If set to a value != -1, you will see your scheduler regularly restart. This is its normal
## behavior under these conditions.
schedulerNumRuns: "-1"
##
## Set schedulerDoPickle to toggle whether to have the scheduler
## attempt to pickle the DAG object to send over to the workers,
## instead of letting workers run their version of the code.
## See the Airflow documentation for the --do_pickle argument: https://airflow.apache.org/cli.html#scheduler
schedulerDoPickle: true
##
## Number of replicas for web server.
webReplicas: 1
##
## Custom airflow configuration environment variables
## Use this to override any airflow setting settings defining environment variables in the
## following form: AIRFLOW__<section>__<key>.
## See the Airflow documentation: https://airflow.readthedocs.io/en/stable/howto/set-config.html?highlight=setting-configuration
## Example:
## config:
## AIRFLOW__CORE__EXPOSE_CONFIG: "True"
## HTTP_PROXY: "http://proxy.mycompany.com:123"
config: {}
##
## Configure pod disruption budget for the scheduler
podDisruptionBudget:
maxUnavailable: 1
## Add custom connections
## Use this to add Airflow connections for operators you use
## For each connection - the id and type have to be defined.
## All the other parameters are optional
## Connections will be created with a script that is stored
## in a K8s secret and mounted into the scheduler container
## Example:
## connections:
## - id: my_aws
## type: aws
## extra: '{"aws_access_key_id": "**********", "aws_secret_access_key": "***", "region_name":"eu-central-1"}'
connections: []
## Add airflow variables
## This should be a json string with your variables in it
## Examples:
## variables: '{ "environment": "dev" }'
variables: {}
## Add airflow ppols
## This should be a json string with your pools in it
## Examples:
## pools: '{ "example": { "description": "This is an example of a pool", "slots": 2 } }'
pools: {}
##
## Annotations for the Scheduler, Worker and Web pods
podAnnotations: {}
## Example:
## iam.amazonaws.com/role: airflow-Role
extraContainers: []
## Additional containers to run alongside the Scheduler, Worker and Web pods
## This could, for example, be used to run a sidecar that syncs DAGs from object storage.
# - name: s3-sync
# image: my-user/s3sync:latest
# volumeMounts:
# - name: synchronised-dags
# mountPath: /dags
extraVolumeMounts:
- name: shm-volume
mountPath: /dev/shm
- name: googlesheets-secret
readOnly: true
mountPath: "/etc/googlesheets_secret"
## Additional volumeMounts to the main containers in the Scheduler, Worker and Web pods.
# - name: synchronised-dags
# mountPath: /usr/local/airflow/dags
extraVolumes:
- name: shm-volume
emptyDir:
medium: Memory
- name: googlesheets-secret
secret:
secretName: googlesheets
## Additional volumes for the Scheduler, Worker and Web pods.
# - name: synchronised-dags
# emptyDir: {}
##
## Run initdb when the scheduler starts.
initdb: true
scheduler:
resources:
limits:
cpu: "1000m"
memory: "1Gi"
requests:
cpu: "500m"
memory: "512Mi"
## Support Node, affinity and tolerations for scheduler pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector:
nodegroup: development
affinity: {}
tolerations: []
flower:
resources:
limits:
cpu: "100m"
memory: "128Mi"
requests:
cpu: "100m"
memory: "128Mi"
service:
annotations: {}
type: ClusterIP
externalPort: 5555
## Support Node, affinity and tolerations for flower pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector:
nodegroup: development
affinity: {}
tolerations: []
web:
resources:
limits:
cpu: "300m"
memory: "1Gi"
requests:
cpu: "100m"
memory: "512Mi"
initialStartupDelay: "60"
initialDelaySeconds: "360"
## Support Node, affinity and tolerations for web pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector:
nodegroup: development
affinity: {}
tolerations: []
##
## Directory in which to mount secrets on webserver nodes.
secretsDir: /var/airflow/secrets
##
## Secrets which will be mounted as a file at `secretsDir/<secret name>`.
secrets: []
##
## Workers configuration
workers:
enabled: true
##
## Number of workers pod to launch
replicas: 1
##
## Custom resource configuration
resources:
limits:
cpu: "1"
memory: "2G"
requests:
cpu: "0.5"
memory: "512Mi"
##
## Annotations for the Worker pods
podAnnotations: {}
## Example:
## iam.amazonaws.com/role: airflow-Role
##
## Celery worker configuration
celery:
##
## number of parallel celery tasks per worker
instances: 1
##
## Directory in which to mount secrets on worker nodes.
secretsDir: /var/airflow/secrets
##
## Secrets which will be mounted as a file at `secretsDir/<secret name>`.
secrets: []
## Support Node, affinity and tolerations for worker pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector:
nodegroup: development
affinity: {}
tolerations: []
##
## Ingress configuration
ingress:
##
## enable ingress
## Note: If you want to change url prefix for web ui or flower even if you do not use ingress,
## you can still change ingress.web.path and ingress.flower.path
enabled: true
##
## Configure the webserver endpoint
web:
## NOTE: This requires an airflow version > 1.9.x
## For the moment (March 2018) this is **not** available on official package, you will have
## to use an image where airflow has been updated to its current HEAD.
## You can use the following one:
## stibbons31/docker-airflow-dev:2.0dev
##
## if path is '/airflow':
## - UI will be accessible at 'http://mycompany.com/airflow/admin'
## - Healthcheck is at 'http://mycompany.com/airflow/health'
## - api is at 'http://mycompany.com/airflow/api'
## NOTE: do NOT keep trailing slash. For root configuration, set and empty string
path: ""
##
## hostname for the webserver
host: "dev-airflow.hubble.sg"
##
## Annotations for the webserver
## Airflow webserver handles relative path completely, just let your load balancer give the HTTP
## header like the requested URL (no special configuration neeed)
annotations:
external-dns.alpha.kubernetes.io/aws-zone-type-public: "true"
kubernetes.io/ingress.class: nginx-external
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/whitelist-source-range: "158.140.140.248/32, 172.31.0.0/16, 10.10.0.0/16"
##
## Configure the web liveness path.
## Defaults to the templated value `{{ ingress.web.path }}/health`
livenessPath:
tls:
## Set to "true" to enable TLS termination at the ingress
enabled: false
## If enabled, set "secretName" to the secret containing the TLS private key and certificate
## Example:
## secretName: example-com-crt
##
## Configure the flower endpoind
flower:
##
## If flower is '/airflow/flower':
## - Flower UI is at 'http://mycompany.com/airflow/flower'
## NOTE: you need to have a reverse proxy/load balancer able to do URL rewrite in order to have
## flower mounted on other path than root. Flower only does half the job in url prefixing: it
## only generates the right URL/relative paths in the **returned HTML files**, but expects the
## request to have been be at the root.
## That's why we need a reverse proxy/load balancer that is able to strip the path
## NOTE: do NOT keep trailing slash. For root configuration, set and empty string
path: "/flower"
##
## Configure the liveness path. Keep to "/" for Flower >= jan 2018.
## For previous version, enter the same path than in the 'path' key
## NOTE: keep the trailing slash.
livenessPath: /
##
## hostname for flower
host: "dev-airflow.hubble.sg"
##
## Annotation for the Flower endpoint
##
annotations:
external-dns.alpha.kubernetes.io/aws-zone-type-public: "true"
kubernetes.io/ingress.class: nginx-external
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/whitelist-source-range: "158.140.140.248/32, 172.31.0.0/16, 10.10.0.0/16"
tls:
## Set to "true" to enable TLS termination at the ingress
enabled: false
## If enabled, set "secretName" to the secret containing the TLS private key and certificate
## Example:
## secretName: example-com-crt
##
## Storage configuration for DAGs
persistence:
##
## enable persistance storage
enabled: false
##
## Existing claim to use
# existingClaim: nil
## Existing claim's subPath to use, e.g. "dags" (optional)
# subPath: ""
##
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
# storageClass: default
accessMode: ReadWriteOnce
##
## Persistant storage size request
size: 1Gi
##
## Storage configuration for logs
logsPersistence:
##
## enable persistance storage
enabled: true
##
## Existing claim to use
# existingClaim: nil
## Existing claim's subPath to use, e.g. "logs" (optional)
# subPath: ""
##
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
## A configuration for shared log storage requires a `storageClass` that
## supports the `ReadWriteMany` accessMode, such as NFS or AWS EFS.
# storageClass: default
accessMode: ReadWriteOnce
##
## Persistant storage size request
size: 1Gi
##
## Configure DAGs deployment and update
dags:
##
## mount path for persistent volume.
## Note that this location is referred to in airflow.cfg, so if you change it, you must update airflow.cfg accordingly.
path: /usr/local/airflow/dags
##
## Set to True to prevent pickling DAGs from scheduler to workers
doNotPickle: false
##
## Configure Git repository to fetch DAGs
git:
##
## url to clone the git repository
url:
##
## branch name, tag or sha1 to reset to
ref: master
## pre-created secret with key, key.pub and known_hosts file for private repos
secret: {}
initContainer:
## Fetch the source code when the pods starts
enabled: false
## Image for the init container (any image with git will do)
image:
## docker-airflow image
repository: alpine/git
## image tag
tag: 1.0.7
## Image pull policy
## values: Always or IfNotPresent
pullPolicy: IfNotPresent
## install requirements.txt dependencies automatically
installRequirements: true
##
## Configure logs
logs:
path: /usr/local/airflow/logs
##
## Enable RBAC
rbac:
##
## Specifies whether RBAC resources should be created
create: true
##
## Create or use ServiceAccount
serviceAccount:
##
## Specifies whether a ServiceAccount should be created
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
##
## Configuration values for the postgresql dependency.
## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
postgresql:
##
## Use the PostgreSQL chart dependency.
## Set to false if bringing your own PostgreSQL.
enabled: true
##
## The name of an existing secret that contains the postgres password.
existingSecret:
## Name of the key containing the secret.
existingSecretKey: postgres-password
##
## If you are bringing your own PostgreSQL, you should set postgresHost and
## also probably service.port, postgresUser, postgresPassword, and postgresDatabase
## postgresHost:
##
## PostgreSQL port
service:
port: 5432
## PostgreSQL User to create.
postgresUser: postgres
##
## PostgreSQL Password for the new user.
## If not set, a random 10 characters password will be used.
postgresPassword: airflow
##
## PostgreSQL Database to create.
postgresDatabase: airflow
##
## Persistent Volume Storage configuration.
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes
persistence:
##
## Enable PostgreSQL persistence using Persistent Volume Claims.
enabled: true
##
## Persistant class
# storageClass: classname
##
## Access mode:
accessMode: ReadWriteOnce
nodeSelector:
nodegroup: development
## Configuration values for the Redis dependency.
## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md
redis:
##
## Use the redis chart dependency.
## Set to false if bringing your own redis.
enabled: true
##
## The name of an existing secret that contains the redis password.
existingSecret:
## Name of the key containing the secret.
existingSecretKey: redis-password
##
## If you are bringing your own redis, you can set the host in redisHost.
## redisHost:
##
## Redis password
##
password: airflow
##
## Master configuration
master:
##
## Image configuration
# image:
##
## docker registry secret names (list)
# pullSecrets: nil
##
## Configure persistance
persistence:
##
## Use a PVC to persist data.
enabled: false
##
## Persistant class
# storageClass: classname
##
## Access mode:
accessMode: ReadWriteOnce
nodeSelector:
nodegroup: development
##
## Disable cluster management by default.
cluster:
enabled: false
# Enable this if you're using https://github.com/coreos/prometheus-operator
# Don't forget you need to install something like https://github.com/epoch8/airflow-exporter in your airflow docker container
serviceMonitor:
enabled: false
interval: "30s"
path: /admin/metrics
## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
# Enable this if you're using https://github.com/coreos/prometheus-operator
prometheusRule:
enabled: false
## Namespace in which the prometheus rule is created
# namespace: monitoring
## Define individual alerting rules as required
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup
## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
groups: {}
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}