diff --git a/Tiltfile b/Tiltfile index eaa01f7b1..20af2934a 100644 --- a/Tiltfile +++ b/Tiltfile @@ -76,6 +76,8 @@ local_resource( helm cm-push addons/redis-managed local && \ helm cm-push addons/deepgram local && \ helm cm-push addons/hf-llm-models local && \ + helm cm-push addons/keda-http-add-on local && \ + helm cm-push addons/kube-image-keeper local && \ helm repo update local ''', deps=[ diff --git a/addons/hf-llm-models/templates/deployment.yaml b/addons/hf-llm-models/templates/deployment.yaml index 4b4e7de1d..693928221 100644 --- a/addons/hf-llm-models/templates/deployment.yaml +++ b/addons/hf-llm-models/templates/deployment.yaml @@ -2,21 +2,21 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - llm-model: {{ .Release.Name }} + llm-model: {{ .Release.Name }}-hf-llm annotations: porter.run/hf-llm-model-version: "{{ .Chart.Version }}" - name: {{ .Release.Name }}-workload + name: {{ .Release.Name }}-hf-llm spec: replicas: 1 strategy: type: Recreate selector: matchLabels: - llm-model: {{ .Release.Name }} + llm-model: {{ .Release.Name }}-hf-llm template: metadata: labels: - llm-model: {{ .Release.Name }} + llm-model: {{ .Release.Name }}-hf-llm spec: tolerations: - key: "removable" @@ -49,7 +49,7 @@ spec: - --max-model-len={{ .Values.maxModelLen }} {{- end }} image: {{ .Values.vllmImage }} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent env: - name: HF_TOKEN value: {{ .Values.huggingFaceToken }} @@ -57,6 +57,16 @@ spec: - containerPort: 8000 protocol: TCP name: https + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8000 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 2 resources: requests: {{- if .Values.resources.requests.cpu }} @@ -88,4 +98,4 @@ spec: volumes: - name: model-volume persistentVolumeClaim: - claimName: "{{ .Release.Name }}-model-pvc" + claimName: "{{ .Release.Name }}-hf-llm" diff --git a/addons/hf-llm-models/templates/httpscaledobject.yaml b/addons/hf-llm-models/templates/httpscaledobject.yaml new file mode 100644 index 000000000..bf777809c --- /dev/null +++ b/addons/hf-llm-models/templates/httpscaledobject.yaml @@ -0,0 +1,20 @@ +{{ if .Values.autoscaling.enabled }} +kind: HTTPScaledObject +apiVersion: http.keda.sh/v1alpha1 +metadata: + name: {{ .Release.Name }}-hf-llm +spec: + hosts: + - {{ .Release.Name }}.porter.llm + scaleTargetRef: + deployment: {{ .Release.Name }}-hf-llm + service: {{ .Release.Name }}-hf-llm + port: 8000 + replicas: + min: {{ .Values.autoscaling.min }} + max: {{ .Values.autoscaling.max }} + scaledownPeriod: {{ .Values.autoscaling.scaledownPeriod }} + scalingMetric: + concurrency: + targetValue: {{ .Values.autoscaling.targetConcurrency }} +{{- end }} \ No newline at end of file diff --git a/addons/hf-llm-models/templates/pvc.yaml b/addons/hf-llm-models/templates/pvc.yaml index 3d9b2ae1f..997e09b85 100644 --- a/addons/hf-llm-models/templates/pvc.yaml +++ b/addons/hf-llm-models/templates/pvc.yaml @@ -1,11 +1,11 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: {{ .Release.Name }}-model-pvc + name: {{ .Release.Name }}-hf-llm spec: accessModes: - ReadWriteMany - storageClassName: efs-{{ .Release.Name }} + storageClassName: efs-{{ .Release.Name }}-hf-llm resources: requests: storage: 20Gi \ No newline at end of file diff --git a/addons/hf-llm-models/templates/service.yaml b/addons/hf-llm-models/templates/service.yaml index 76a1d5db1..8b4d13376 100644 --- a/addons/hf-llm-models/templates/service.yaml +++ b/addons/hf-llm-models/templates/service.yaml @@ -2,12 +2,12 @@ apiVersion: v1 kind: Service metadata: labels: - llm-model: {{ .Release.Name }} - name: {{ .Release.Name }} + llm-model: {{ .Release.Name }}-hf-llm + name: {{ .Release.Name }}-hf-llm spec: ports: - name: https port: 8000 targetPort: https selector: - llm-model: {{ .Release.Name }} \ No newline at end of file + llm-model: {{ .Release.Name }}-hf-llm \ No newline at end of file diff --git a/addons/hf-llm-models/templates/storageclass.yaml b/addons/hf-llm-models/templates/storageclass.yaml index 49d6c6876..0acf0329f 100644 --- a/addons/hf-llm-models/templates/storageclass.yaml +++ b/addons/hf-llm-models/templates/storageclass.yaml @@ -1,7 +1,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: efs-{{ .Release.Name }} + name: efs-{{ .Release.Name }}-hf-llm provisioner: efs.csi.aws.com parameters: provisioningMode: efs-ap diff --git a/addons/hf-llm-models/values.yaml b/addons/hf-llm-models/values.yaml index 479158301..3b65d4980 100644 --- a/addons/hf-llm-models/values.yaml +++ b/addons/hf-llm-models/values.yaml @@ -29,4 +29,11 @@ resources: tolerations: - key: "nvidia.com/gpu" operator: "Exists" - effect: "NoSchedule" \ No newline at end of file + effect: "NoSchedule" + +autoscaling: + enabled: false + minReplicas: 0 + maxReplicas: 10 + scaledownPeriod: 300 # the time in seconds to wait before scaling down the deployment after the last request + targetConcurrency: 100 # the target concurrent connections per replica \ No newline at end of file diff --git a/addons/keda-http-add-on/.helmignore b/addons/keda-http-add-on/.helmignore new file mode 100644 index 000000000..0b24bcc19 --- /dev/null +++ b/addons/keda-http-add-on/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +*.gotmpl \ No newline at end of file diff --git a/addons/keda-http-add-on/Chart.yaml b/addons/keda-http-add-on/Chart.yaml new file mode 100644 index 000000000..ceaa78f46 --- /dev/null +++ b/addons/keda-http-add-on/Chart.yaml @@ -0,0 +1,31 @@ +apiVersion: v2 +type: application +name: keda-add-ons-http +description: Event-based autoscaler for HTTP workloads on Kubernetes + +# Specify the Kubernetes version range that we support. +# We allow pre-release versions for cloud-specific Kubernetes versions such as v1.21.5-gke.1302 or v1.18.9-eks-d1db3c +kubeVersion: ">=v1.23.0-0" + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. This is incremented at chart release time and does not need +# to be included in any PRs to main. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.8.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 0.8.0 +home: https://github.com/kedacore/http-add-on +sources: + - https://github.com/kedacore/http-add-on +maintainers: + - name: Ahmed ElSayed + email: ahmels@microsoft.com + - name: Jorge Turrado + email: jorge_turrado@hotmail.es + - name: Tom Kerkhove + email: kerkhove.tom@gmail.com + - name: Zbynek Roubalik + email: zbynek@kedify.io \ No newline at end of file diff --git a/addons/keda-http-add-on/README.md b/addons/keda-http-add-on/README.md new file mode 100644 index 000000000..6fce615bf --- /dev/null +++ b/addons/keda-http-add-on/README.md @@ -0,0 +1,255 @@ +

+ +

Kubernetes-based Event Driven Autoscaling - HTTP Add-On

+

+ +The KEDA HTTP Add On allows Kubernetes users to automatically scale their HTTP servers up and down (including to/from zero) based on incoming HTTP traffic. Please see our [use cases document](./docs/use_cases.md) to learn more about how and why you would use this project. + +| 🚧 **Alpha - Not for production** 🚧| +|---------------------------------------------| +| ⚠ The HTTP add-on is in [experimental stage](https://github.com/kedacore/keda/issues/538) and not ready for production.

It is provided as-is without support. + +>This codebase moves very quickly. We can't currently guarantee that any part of it will work. Neither the complete feature set nor known issues may be fully documented. Similarly, issues filed against this project may not be responded to quickly or at all. **We will release and announce a beta release of this project**, and after we do that, we will document and respond to issues properly. + +## Walkthrough + +Although this is an **alpha release** project right now, we have prepared a walkthrough document that with instructions on getting started for basic usage. + +See that document at [docs/walkthrough.md](https://github.com/kedacore/http-add-on/tree/main/docs/walkthrough.md) + +## Design + +The HTTP add-on is composed of multiple mostly independent components. This design was chosen to allow for highly +customizable installations while allowing us to ship reasonable defaults. + +- We have written a complete design document. Please see it at [docs/design.md](https://github.com/kedacore/http-add-on/tree/main/docs/design.md). +- For more context on the design, please see our [scope document](https://github.com/kedacore/http-add-on/tree/main/docs/scope.md). +- If you have further questions about the project, please see our [FAQ document](https://github.com/kedacore/http-add-on/tree/main/docs/faq.md). + +## Installation + +Please see the [complete installation instructions](https://github.com/kedacore/http-add-on/tree/main/docs/install.md). + +## Contributing + +Please see the [contributing documentation for all instructions](https://github.com/kedacore/http-add-on/tree/main/docs/contributing.md). + +--- +We are a Cloud Native Computing Foundation (CNCF) graduated project. +

+ +--- + +## TL;DR + +```console +helm repo add kedacore https://kedacore.github.io/charts +helm repo update + +helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace keda --version 0.8.0 +``` + +## Introduction + +This chart bootstraps KEDA HTTP Add-on infrastructure on a Kubernetes cluster using the Helm package manager. + +As part of that, it will install all the required Custom Resource Definitions (CRD). + +## Installing the Chart + +To install the chart with the release name `http-add-on`, please read the [install instructions on the official repository to get started](https://github.com/kedacore/http-add-on/tree/main/docs/install.md): + +```console +$ helm install http-add-on kedacore/keda-add-ons-http --namespace keda +``` + +> **Important:** This chart **needs** KEDA installed in your cluster to work properly. + +## Uninstalling the Chart + +To uninstall/delete the `http-add-on` Helm chart: + +```console +helm uninstall http-add-on +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the HTTP Add-On chart and +their default values. + +### General parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `additionalLabels` | object | `{}` | Additional labels to be applied to installed resources. Note that not all resources will receive these labels. | +| `crds.install` | bool | `true` | Whether to install the `HTTPScaledObject` [`CustomResourceDefinition`](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) | +| `images.interceptor` | string | `"ghcr.io/kedacore/http-add-on-interceptor"` | Image name for the interceptor image component | +| `images.kubeRbacProxy.name` | string | `"gcr.io/kubebuilder/kube-rbac-proxy"` | Image name for the Kube RBAC Proxy image component | +| `images.kubeRbacProxy.tag` | string | `"v0.13.0"` | Image tag for the Kube RBAC Proxy image component | +| `images.operator` | string | `"ghcr.io/kedacore/http-add-on-operator"` | Image name for the operator image component | +| `images.scaler` | string | `"ghcr.io/kedacore/http-add-on-scaler"` | Image name for the scaler image component | +| `images.tag` | string | `""` | Image tag for the http add on. This tag is applied to the images listed in `images.operator`, `images.interceptor`, and `images.scaler`. Optional, given app version of Helm chart is used by default | +| `logging.interceptor.format` | string | `"console"` | Logging format for KEDA http-add-on Interceptor. allowed values: `json` or `console` | +| `logging.interceptor.level` | string | `"info"` | Logging level for KEDA http-add-on Interceptor. allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string | +| `logging.interceptor.timeEncoding` | string | `"rfc3339"` | Logging time encoding for KEDA http-add-on Interceptor. allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` | +| `logging.operator.format` | string | `"console"` | Logging format for KEDA http-add-on operator. allowed values: `json` or `console` | +| `logging.operator.kubeRbacProxy.level` | int | `10` | Logging level for KEDA http-add-on operator rbac proxy allowed values: `0` for info, `4` for debug, or an integer value greater than 0 | +| `logging.operator.level` | string | `"info"` | Logging level for KEDA http-add-on operator. allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string | +| `logging.operator.timeEncoding` | string | `"rfc3339"` | Logging time encoding for KEDA http-add-on operator. allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` | +| `logging.scaler.format` | string | `"console"` | Logging format for KEDA http-add-on Scaler. allowed values: `json` or `console` | +| `logging.scaler.level` | string | `"info"` | Logging level for KEDA http-add-on Scaler. allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string | +| `logging.scaler.timeEncoding` | string | `"rfc3339"` | Logging time encoding for KEDA http-add-on Scaler. allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` | +| `podSecurityContext` | object | [See below](#KEDA-is-secure-by-default) | [Pod security context] for all pods | +| `rbac.aggregateToDefaultRoles` | bool | `false` | Install aggregate roles for edit and view | +| `securityContext` | object | [See below](#KEDA-is-secure-by-default) | [Security context] for all containers | + +### Operator + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `operator.affinity` | object | `{}` | Affinity for pod scheduling ([docs](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/)) | +| `operator.imagePullSecrets` | list | `[]` | The image pull secrets for the operator component | +| `operator.kubeRbacProxy.resources.limits` | object | `{"cpu":"300m","memory":"200Mi"}` | The CPU/memory resource limit for the operator component's kube rbac proxy | +| `operator.kubeRbacProxy.resources.requests` | object | `{"cpu":"10m","memory":"20Mi"}` | The CPU/memory resource request for the operator component's kube rbac proxy | +| `operator.nodeSelector` | object | `{}` | Node selector for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)) | +| `operator.port` | int | `8443` | The port for the operator main server to run on | +| `operator.pullPolicy` | string | `"Always"` | The image pull policy for the operator component | +| `operator.resources.limits` | object | `{"cpu":0.5,"memory":"64Mi"}` | The CPU/memory resource limit for the operator component | +| `operator.resources.requests` | object | `{"cpu":"250m","memory":"20Mi"}` | The CPU/memory resource request for the operator component | +| `operator.tolerations` | list | `[]` | Tolerations for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)) | +| `operator.watchNamespace` | string | `""` | The namespace to watch for new `HTTPScaledObject`s. Leave this blank (i.e. `""`) to tell the operator to watch all namespaces. | + +### Scaler + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `scaler.affinity` | object | `{}` | Affinity for pod scheduling ([docs](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/)) | +| `scaler.grpcPort` | int | `9090` | The port for the scaler's gRPC server. This is the server that KEDA will send scaling requests to. | +| `scaler.imagePullSecrets` | list | `[]` | The image pull secrets for the scaler component | +| `scaler.nodeSelector` | object | `{}` | Node selector for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)) | +| `scaler.pendingRequestsInterceptor` | int | `200` | The number of "target requests" that the external scaler will report to KEDA for the interceptor's scaling metrics. See the [KEDA external scaler documentation](https://keda.sh/docs/2.4/concepts/external-scalers/) for details on target requests. | +| `scaler.pullPolicy` | string | `"Always"` | The image pull policy for the scaler component | +| `scaler.replicas` | int | `3` | Number of replicas | +| `scaler.resources.limits.cpu` | float | `0.5` | | +| `scaler.resources.limits.memory` | string | `"64Mi"` | | +| `scaler.resources.requests.cpu` | string | `"250m"` | | +| `scaler.resources.requests.memory` | string | `"20Mi"` | | +| `scaler.service` | string | `"external-scaler"` | The name of the Kubernetes `Service` for the scaler component | +| `scaler.streamInterval` | int | `200` | Interval in ms for communicating IsActive to KEDA | +| `scaler.tolerations` | list | `[]` | Tolerations for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)) | + +### Interceptor + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `interceptor.admin.port` | int | `9090` | The port for the interceptor's admin server to run on | +| `interceptor.admin.service` | string | `"interceptor-admin"` | The name of the Kubernetes `Service` for the interceptor's admin service | +| `interceptor.affinity` | object | `{}` | Affinity for pod scheduling ([docs](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/)) | +| `interceptor.endpointsCachePollingIntervalMS` | int | `250` | How often (in milliseconds) the interceptor does a full refresh of its endpoints cache. The interceptor will also use Kubernetes events to stay up-to-date with the endpoints cache changes. This duration is the maximum time it will take to see changes to the endpoints. | +| `interceptor.expectContinueTimeout` | string | `"1s"` | Special handling for responses with "Expect: 100-continue" response headers. see https://pkg.go.dev/net/http#Transport under the 'ExpectContinueTimeout' field for more details | +| `interceptor.forceHTTP2` | bool | `false` | Whether or not the interceptor should force requests to use HTTP/2 | +| `interceptor.idleConnTimeout` | string | `"90s"` | The timeout after which any idle connection is closed and removed from the interceptor's in-memory connection pool. | +| `interceptor.imagePullSecrets` | list | `[]` | The image pull secrets for the interceptor component | +| `interceptor.keepAlive` | string | `"1s"` | The interceptor's connection keep alive timeout | +| `interceptor.maxIdleConns` | int | `100` | The maximum number of idle connections allowed in the interceptor's in-memory connection pool. Set to 0 to indicate no limit | +| `interceptor.nodeSelector` | object | `{}` | Node selector for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)) | +| `interceptor.proxy.port` | int | `8080` | The port on which the interceptor's proxy service will listen for live HTTP traffic | +| `interceptor.proxy.service` | string | `"interceptor-proxy"` | The name of the Kubernetes `Service` for the interceptor's proxy service. This is the service that accepts live HTTP traffic. | +| `interceptor.pullPolicy` | string | `"Always"` | The image pull policy for the interceptor component | +| `interceptor.replicas.max` | int | `50` | The maximum number of interceptor replicas that should ever be running | +| `interceptor.replicas.min` | int | `3` | The minimum number of interceptor replicas that should ever be running | +| `interceptor.replicas.waitTimeout` | string | `"20s"` | The maximum time the interceptor should wait for an HTTP request to reach a backend before it is considered a failure | +| `interceptor.resources.limits` | object | `{"cpu":0.5,"memory":"64Mi"}` | The CPU/memory resource limit for the operator component | +| `interceptor.resources.requests` | object | `{"cpu":"250m","memory":"20Mi"}` | The CPU/memory resource request for the operator component | +| `interceptor.responseHeaderTimeout` | string | `"500ms"` | How long the interceptor will wait between forwarding a request to a backend and receiving response headers back before failing the request | +| `interceptor.scaledObject.pollingInterval` | int | `1` | The interval (in milliseconds) that KEDA should poll the external scaler to fetch scaling metrics about the interceptor | +| `interceptor.tcpConnectTimeout` | string | `"500ms"` | How long the interceptor waits to establish TCP connections with backends before failing a request. | +| `interceptor.tlsHandshakeTimeout` | string | `"10s"` | The maximum amount of time the interceptor will wait for a TLS handshake. Set to zero to indicate no timeout. | +| `interceptor.tolerations` | list | `[]` | Tolerations for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)) | + +Specify each parameter using the `--set key=value[,key=value]` argument to +`helm install`. For example: + +```console +$ helm install http-add-on kedacore/keda-add-ons-http --namespace keda \ + --set version= +``` + +Alternatively, a YAML file that specifies the values for the above parameters can +be provided while installing the chart. For example, + +```console +helm install http-add-on kedacore/keda-add-ons-http --namespace keda -f values.yaml +``` + +## KEDA is secure by default + +Our default configuration strives to be as secure as possible. Because of that, KEDA will run as non-root and be secure-by-default. You can define global securityContext for all components or switch to granular mode and define securityContext for operator, kuberbacproxy, scaler, and interceptor: +```yaml +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + # runAsUser: 1000 + # runAsGroup: 1000 + # operator: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # kuberbacproxy: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # scaler: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # interceptor: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault +podSecurityContext: + fsGroup: 1000 + supplementalGroups: + - 1000 + # operator: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + # scaler: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + # interceptor: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 +``` + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs](https://github.com/norwoodj/helm-docs) \ No newline at end of file diff --git a/addons/keda-http-add-on/README.md.gotmpl b/addons/keda-http-add-on/README.md.gotmpl new file mode 100644 index 000000000..1ebdceafc --- /dev/null +++ b/addons/keda-http-add-on/README.md.gotmpl @@ -0,0 +1,205 @@ +

+ +

Kubernetes-based Event Driven Autoscaling - HTTP Add-On

+

+ +The KEDA HTTP Add On allows Kubernetes users to automatically scale their HTTP servers up and down (including to/from zero) based on incoming HTTP traffic. Please see our [use cases document](./docs/use_cases.md) to learn more about how and why you would use this project. + +| 🚧 **Alpha - Not for production** 🚧| +|---------------------------------------------| +| ⚠ The HTTP add-on is in [experimental stage](https://github.com/kedacore/keda/issues/538) and not ready for production.

It is provided as-is without support. + +>This codebase moves very quickly. We can't currently guarantee that any part of it will work. Neither the complete feature set nor known issues may be fully documented. Similarly, issues filed against this project may not be responded to quickly or at all. **We will release and announce a beta release of this project**, and after we do that, we will document and respond to issues properly. + +## Walkthrough + +Although this is an **alpha release** project right now, we have prepared a walkthrough document that with instructions on getting started for basic usage. + +See that document at [docs/walkthrough.md](https://github.com/kedacore/http-add-on/tree/main/docs/walkthrough.md) + +## Design + +The HTTP add-on is composed of multiple mostly independent components. This design was chosen to allow for highly +customizable installations while allowing us to ship reasonable defaults. + +- We have written a complete design document. Please see it at [docs/design.md](https://github.com/kedacore/http-add-on/tree/main/docs/design.md). +- For more context on the design, please see our [scope document](https://github.com/kedacore/http-add-on/tree/main/docs/scope.md). +- If you have further questions about the project, please see our [FAQ document](https://github.com/kedacore/http-add-on/tree/main/docs/faq.md). + +## Installation + +Please see the [complete installation instructions](https://github.com/kedacore/http-add-on/tree/main/docs/install.md). + +## Contributing + +Please see the [contributing documentation for all instructions](https://github.com/kedacore/http-add-on/tree/main/docs/contributing.md). + +--- +We are a Cloud Native Computing Foundation (CNCF) graduated project. +

+ +--- + +## TL;DR + +```console +helm repo add kedacore https://kedacore.github.io/charts +helm repo update + +helm install http-add-on kedacore/keda-add-ons-http --create-namespace --namespace keda --version {{ template "chart.appVersion" . }} +``` + +## Introduction + +This chart bootstraps KEDA HTTP Add-on infrastructure on a Kubernetes cluster using the Helm package manager. + +As part of that, it will install all the required Custom Resource Definitions (CRD). + +## Installing the Chart + +To install the chart with the release name `http-add-on`, please read the [install instructions on the official repository to get started](https://github.com/kedacore/http-add-on/tree/main/docs/install.md): + +```console +$ helm install http-add-on kedacore/keda-add-ons-http --namespace keda +``` + +> **Important:** This chart **needs** KEDA installed in your cluster to work properly. + +## Uninstalling the Chart + +To uninstall/delete the `http-add-on` Helm chart: + +```console +helm uninstall http-add-on +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the HTTP Add-On chart and +their default values. + +### General parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +{{- range .Values }} + {{- if not (or (hasPrefix "operator" .Key) (hasPrefix "scaler" .Key) (hasPrefix "interceptor" .Key) ) }} +| `{{ .Key }}` | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Operator + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "operator" .Key }} +| `{{ .Key }}` | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Scaler + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "scaler" .Key }} +| `{{ .Key }}` | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Interceptor + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "interceptor" .Key }} +| `{{ .Key }}` | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +Specify each parameter using the `--set key=value[,key=value]` argument to +`helm install`. For example: + +```console +$ helm install http-add-on kedacore/keda-add-ons-http --namespace keda \ + --set version= +``` + +Alternatively, a YAML file that specifies the values for the above parameters can +be provided while installing the chart. For example, + +```console +helm install http-add-on kedacore/keda-add-ons-http --namespace keda -f values.yaml +``` + +## KEDA is secure by default + +Our default configuration strives to be as secure as possible. Because of that, KEDA will run as non-root and be secure-by-default. You can define global securityContext for all components or switch to granular mode and define securityContext for operator, kuberbacproxy, scaler, and interceptor: +```yaml +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + # runAsUser: 1000 + # runAsGroup: 1000 + # operator: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # kuberbacproxy: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # scaler: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # interceptor: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault +podSecurityContext: + fsGroup: 1000 + supplementalGroups: + - 1000 + # operator: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + # scaler: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + # interceptor: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 +``` + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs](https://github.com/norwoodj/helm-docs) \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/_helpers.tpl b/addons/keda-http-add-on/templates/_helpers.tpl new file mode 100644 index 000000000..1889c31b3 --- /dev/null +++ b/addons/keda-http-add-on/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "keda-http-add-on.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate match labels +IMPORTANT: Any change of these labels will block +future upgrades +*/}} +{{- define "keda-http-add-on.matchLabels" }} +app.kubernetes.io/part-of: {{ .Chart.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Generate basic labels +*/}} +{{- define "keda-http-add-on.labels" }} +{{- include "keda-http-add-on.matchLabels" . }} +app.kubernetes.io/version: {{ .Values.images.tag | default .Chart.AppVersion }} +helm.sh/chart: {{ include "keda-http-add-on.chart" . }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/crd.yaml b/addons/keda-http-add-on/templates/crd.yaml new file mode 100644 index 000000000..9c64f00cf --- /dev/null +++ b/addons/keda-http-add-on/templates/crd.yaml @@ -0,0 +1,213 @@ +{{ if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: httpscaledobjects.http.keda.sh +spec: + group: http.keda.sh + names: + kind: HTTPScaledObject + listKind: HTTPScaledObjectList + plural: httpscaledobjects + shortNames: + - httpso + singular: httpscaledobject + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.targetWorkload + name: TargetWorkload + type: string + - jsonPath: .status.targetService + name: TargetService + type: string + - jsonPath: .spec.replicas.min + name: MinReplicas + type: integer + - jsonPath: .spec.replicas.max + name: MaxReplicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="HTTPScaledObjectIsReady")].status + name: Active + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPScaledObject is the Schema for the httpscaledobjects API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: HTTPScaledObjectSpec defines the desired state of HTTPScaledObject + properties: + hosts: + description: |- + The hosts to route. All requests which the "Host" header + matches any .spec.hosts (and the Request Target matches any + .spec.pathPrefixes) will be routed to the Service and Port specified in + the scaleTargetRef. + items: + type: string + type: array + pathPrefixes: + description: |- + The paths to route. All requests which the Request Target matches any + .spec.pathPrefixes (and the "Host" header matches any .spec.hosts) + will be routed to the Service and Port specified in + the scaleTargetRef. + items: + type: string + type: array + replicas: + description: (optional) Replica information + properties: + max: + description: Maximum amount of replicas to have in the deployment + (Default 100) + format: int32 + type: integer + min: + description: Minimum amount of replicas to have in the deployment + (Default 0) + format: int32 + type: integer + type: object + scaleTargetRef: + description: The name of the deployment to route HTTP requests to + (and to autoscale). + properties: + apiVersion: + type: string + deployment: + description: 'Deprecated: The name of the deployment to scale + according to HTTP traffic' + type: string + kind: + type: string + name: + type: string + port: + description: The port to route to + format: int32 + type: integer + service: + description: The name of the service to route to + type: string + required: + - port + - service + type: object + scaledownPeriod: + description: (optional) Cooldown period value + format: int32 + type: integer + scalingMetric: + description: (optional) Configuration for the metric used for scaling + properties: + concurrency: + description: Scaling based on concurrent requests for a given + target + properties: + targetValue: + default: 100 + description: Target value for rate scaling + type: integer + type: object + requestRate: + description: Scaling based the average rate during an specific + time window for a given target + properties: + granularity: + default: 1s + description: Time granularity for rate calculation + type: string + targetValue: + default: 100 + description: Target value for rate scaling + type: integer + window: + default: 1m + description: Time window for rate calculation + type: string + type: object + type: object + targetPendingRequests: + description: (optional) DEPRECATED (use SscalingMetric instead) Target + metric value + format: int32 + type: integer + required: + - scaleTargetRef + type: object + status: + description: HTTPScaledObjectStatus defines the observed state of HTTPScaledObject + properties: + conditions: + description: Conditions of the operator + items: + description: HTTPScaledObjectCondition stores the condition state + properties: + message: + description: Message indicating details about the transition. + type: string + reason: + description: Reason for the condition's last transition. + enum: + - ErrorCreatingAppScaledObject + - AppScaledObjectCreated + - TerminatingResources + - AppScaledObjectTerminated + - AppScaledObjectTerminationError + - PendingCreation + - HTTPScaledObjectIsReady + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + timestamp: + description: Timestamp of the condition + type: string + type: + description: Type of condition + enum: + - Ready + type: string + required: + - status + - type + type: object + type: array + targetService: + description: TargetService reflects details about the scaled service. + type: string + targetWorkload: + description: TargetWorkload reflects details about the scaled workload. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{ end }} + \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/interceptor/deployment.yaml b/addons/keda-http-add-on/templates/interceptor/deployment.yaml new file mode 100644 index 000000000..630b12793 --- /dev/null +++ b/addons/keda-http-add-on/templates/interceptor/deployment.yaml @@ -0,0 +1,100 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: interceptor + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-interceptor + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app.kubernetes.io/component: interceptor + {{- include "keda-http-add-on.matchLabels" . | indent 6 }} + template: + metadata: + labels: + app.kubernetes.io/component: interceptor + {{- include "keda-http-add-on.labels" . | indent 8 }} + spec: + imagePullSecrets: + {{- toYaml .Values.interceptor.imagePullSecrets | nindent 8 }} + serviceAccountName: {{ .Chart.Name }}-interceptor + {{- if .Values.podSecurityContext.interceptor }} + securityContext: + {{- toYaml .Values.podSecurityContext.interceptor | nindent 8 }} + {{- else }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - args: + - "--zap-log-level={{ .Values.logging.interceptor.level }}" + - "--zap-encoder={{ .Values.logging.interceptor.format }}" + - "--zap-time-encoding={{ .Values.logging.interceptor.timeEncoding }}" + image: "{{ .Values.images.interceptor }}:{{ .Values.images.tag | default .Chart.AppVersion }}" + imagePullPolicy: '{{ .Values.interceptor.pullPolicy | default "Always" }}' + name: "{{ .Chart.Name }}-interceptor" + env: + - name: KEDA_HTTP_CURRENT_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: KEDA_HTTP_PROXY_PORT + value: "{{ .Values.interceptor.proxy.port }}" + - name: KEDA_HTTP_ADMIN_PORT + value: "{{ .Values.interceptor.admin.port }}" + - name: KEDA_HTTP_CONNECT_TIMEOUT + value: "{{ .Values.interceptor.tcpConnectTimeout }}" + - name: KEDA_HTTP_KEEP_ALIVE + value: "{{ .Values.interceptor.keepAlive }}" + - name: KEDA_RESPONSE_HEADER_TIMEOUT + value: "{{ .Values.interceptor.responseHeaderTimeout }}" + - name: KEDA_CONDITION_WAIT_TIMEOUT + value: "{{ .Values.interceptor.replicas.waitTimeout }}" + - name: KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS + value: "{{ .Values.interceptor.endpointsCachePollingIntervalMS }}" + - name: KEDA_HTTP_FORCE_HTTP2 + value: "{{ .Values.interceptor.forceHTTP2 }}" + - name: KEDA_HTTP_MAX_IDLE_CONNS + value: "{{ .Values.interceptor.maxIdleConns }}" + - name: KEDA_HTTP_IDLE_CONN_TIMEOUT + value: "{{ .Values.interceptor.idleConnTimeout }}" + - name: KEDA_HTTP_TLS_HANDSHAKE_TIMEOUT + value: "{{ .Values.interceptor.tlsHandshakeTimeout }}" + - name: KEDA_HTTP_EXPECT_CONTINUE_TIMEOUT + value: "{{ .Values.interceptor.expectContinueTimeout }}" + ports: + - containerPort: {{ .Values.interceptor.admin.port }} + name: admin + - containerPort: {{ .Values.interceptor.proxy.port }} + name: proxy + livenessProbe: + httpGet: + path: /livez + port: proxy + readinessProbe: + httpGet: + path: /readyz + port: proxy + resources: + {{- toYaml .Values.interceptor.resources | nindent 10 }} + {{- if .Values.securityContext.interceptor }} + securityContext: + {{- toYaml .Values.securityContext.interceptor | nindent 10 }} + {{- else }} + securityContext: + {{- toYaml .Values.securityContext | nindent 10 }} + {{- end }} + terminationGracePeriodSeconds: 10 + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.interceptor.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.interceptor.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.interceptor.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/interceptor/rbac.yaml b/addons/keda-http-add-on/templates/interceptor/rbac.yaml new file mode 100644 index 000000000..c818165b7 --- /dev/null +++ b/addons/keda-http-add-on/templates/interceptor/rbac.yaml @@ -0,0 +1,43 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-interceptor +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - http.keda.sh + resources: + - httpscaledobjects + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-interceptor + app.kubernetes.io/name: {{ .Chart.Name }}-interceptor + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-interceptor +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-interceptor +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-interceptor + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/interceptor/scaledobject.yaml b/addons/keda-http-add-on/templates/interceptor/scaledobject.yaml new file mode 100644 index 000000000..49708d74b --- /dev/null +++ b/addons/keda-http-add-on/templates/interceptor/scaledobject.yaml @@ -0,0 +1,20 @@ +# this is the ScaledObject that tells KEDA to scale the interceptor fleet +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: "{{ .Chart.Name }}-interceptor" + namespace: {{ .Release.Namespace }} + labels: + {{- include "keda-http-add-on.labels" . | indent 4 }} +spec: + minReplicaCount: {{ .Values.interceptor.replicas.min }} + maxReplicaCount: {{ .Values.interceptor.replicas.max }} + pollingInterval: {{ .Values.interceptor.scaledObject.pollingInterval }} + scaleTargetRef: + name: "{{ .Chart.Name }}-interceptor" + kind: Deployment + triggers: + - type: external + metadata: + scalerAddress: "{{ .Chart.Name }}-{{ .Values.scaler.service }}.{{ .Release.Namespace }}:{{ default 9091 .Values.scaler.grpcPort }}" + interceptorTargetPendingRequests: "{{ default 200 .Values.scaler.pendingRequestsInterceptor }}" \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/interceptor/service-admin.yaml b/addons/keda-http-add-on/templates/interceptor/service-admin.yaml new file mode 100644 index 000000000..8d80f7c1b --- /dev/null +++ b/addons/keda-http-add-on/templates/interceptor/service-admin.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: interceptor-admin + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: "{{ .Chart.Name }}-{{ .Values.interceptor.admin.service }}" + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: admin + port: {{ default 9091 .Values.interceptor.admin.port }} + targetPort: admin + selector: + app.kubernetes.io/component: interceptor + {{- include "keda-http-add-on.matchLabels" . | indent 4 }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/interceptor/service-proxy.yaml b/addons/keda-http-add-on/templates/interceptor/service-proxy.yaml new file mode 100644 index 000000000..1761f4d5e --- /dev/null +++ b/addons/keda-http-add-on/templates/interceptor/service-proxy.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: interceptor-proxy + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: "{{ .Chart.Name }}-{{ .Values.interceptor.proxy.service }}" + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: proxy + port: {{ default 9091 .Values.interceptor.proxy.port }} + targetPort: proxy + selector: + app.kubernetes.io/component: interceptor + {{- include "keda-http-add-on.matchLabels" . | indent 4 }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/interceptor/serviceaccount.yaml b/addons/keda-http-add-on/templates/interceptor/serviceaccount.yaml new file mode 100644 index 000000000..b2d013889 --- /dev/null +++ b/addons/keda-http-add-on/templates/interceptor/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: interceptor + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-interceptor + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/operator/deployment.yaml b/addons/keda-http-add-on/templates/operator/deployment.yaml new file mode 100644 index 000000000..ef024ba6e --- /dev/null +++ b/addons/keda-http-add-on/templates/operator/deployment.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: operator + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-controller-manager + namespace: {{ .Release.Namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: operator + {{- include "keda-http-add-on.matchLabels" . | indent 6 }} + template: + metadata: + labels: + app.kubernetes.io/component: operator + {{- include "keda-http-add-on.labels" . | indent 8 }} + spec: + imagePullSecrets: + {{- toYaml .Values.operator.imagePullSecrets | nindent 8 }} + serviceAccountName: {{ .Chart.Name }} + {{- if .Values.podSecurityContext.operator }} + securityContext: + {{- toYaml .Values.podSecurityContext.operator | nindent 8 }} + {{- else }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - args: + - --secure-listen-address=0.0.0.0:{{ .Values.operator.port | default 8443 }} + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v={{ .Values.logging.operator.kubeRbacProxy.level }} + image: "{{ .Values.images.kubeRbacProxy.name }}:{{ .Values.images.kubeRbacProxy.tag }}" + name: kube-rbac-proxy + resources: + {{- toYaml .Values.operator.kubeRbacProxy.resources | nindent 10 }} + {{- if .Values.securityContext.kuberbacproxy }} + securityContext: + {{- toYaml .Values.securityContext.kuberbacproxy | nindent 10 }} + {{- else }} + securityContext: + {{- toYaml .Values.securityContext | nindent 10 }} + {{- end }} + - args: + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + - --zap-log-level={{ .Values.logging.operator.level }} + - --zap-time-encoding={{ .Values.logging.operator.timeEncoding }} + - --zap-encoder={{ .Values.logging.operator.format }} + image: "{{ .Values.images.operator }}:{{ .Values.images.tag | default .Chart.AppVersion }}" + imagePullPolicy: '{{ .Values.operator.pullPolicy | default "Always" }}' + name: "{{ .Chart.Name }}-operator" + env: + - name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_SERVICE + value: "{{ .Chart.Name }}-{{ .Values.scaler.service }}" + - name: KEDAHTTP_OPERATOR_EXTERNAL_SCALER_PORT + value: "{{ default 9090 .Values.scaler.grpcPort }}" + - name: KEDA_HTTP_OPERATOR_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: KEDA_HTTP_OPERATOR_WATCH_NAMESPACE + value: "{{ .Values.operator.watchNamespace }}" + ports: + - name: metrics + containerPort: 8080 + - name: probes + containerPort: 8081 + livenessProbe: + httpGet: + path: /healthz + port: probes + readinessProbe: + httpGet: + path: /readyz + port: probes + resources: + {{- toYaml .Values.operator.resources | nindent 10 }} + {{- if .Values.securityContext.operator }} + securityContext: + {{- toYaml .Values.securityContext.operator | nindent 10 }} + {{- else }} + securityContext: + {{- toYaml .Values.securityContext | nindent 10 }} + {{- end }} + terminationGracePeriodSeconds: 10 + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.operator.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.operator.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.operator.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/operator/rbac.yaml b/addons/keda-http-add-on/templates/operator/rbac.yaml new file mode 100644 index 000000000..519e33ac8 --- /dev/null +++ b/addons/keda-http-add-on/templates/operator/rbac.yaml @@ -0,0 +1,177 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-role + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-role + app.kubernetes.io/name: {{ .Chart.Name }}-role + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-role +rules: +- apiGroups: + - http.keda.sh + resources: + - httpscaledobjects + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - http.keda.sh + resources: + - httpscaledobjects/finalizers + verbs: + - update +- apiGroups: + - http.keda.sh + resources: + - httpscaledobjects/status + verbs: + - get + - patch + - update +- apiGroups: + - keda.sh + resources: + - scaledobjects + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-proxy-role + app.kubernetes.io/name: {{ .Chart.Name }}-proxy-role + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-proxy-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-metrics-reader + app.kubernetes.io/name: {{ .Chart.Name }}-metrics-reader + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-role-binding + app.kubernetes.io/name: {{ .Chart.Name }}-role-binding + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-role-rolebinding + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Chart.Name }}-role +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-rolebinding + app.kubernetes.io/name: {{ .Chart.Name }}-rolebinding + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-role +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-rolebinding + app.kubernetes.io/name: {{ .Chart.Name }}-rolebinding + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-proxy-role +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/operator/service.yaml b/addons/keda-http-add-on/templates/operator/service.yaml new file mode 100644 index 000000000..e0dd88b68 --- /dev/null +++ b/addons/keda-http-add-on/templates/operator/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: operator + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-controller-manager-metrics-service + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: metrics + port: {{ default 8443 .Values.operator.port }} + targetPort: metrics + selector: + app.kubernetes.io/component: operator + {{- include "keda-http-add-on.matchLabels" . | indent 4 }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/operator/serviceaccount.yaml b/addons/keda-http-add-on/templates/operator/serviceaccount.yaml new file mode 100644 index 000000000..d33ec74b1 --- /dev/null +++ b/addons/keda-http-add-on/templates/operator/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: operator + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/rbac-aggregateclusterroles.yaml b/addons/keda-http-add-on/templates/rbac-aggregateclusterroles.yaml new file mode 100644 index 000000000..d366e8a39 --- /dev/null +++ b/addons/keda-http-add-on/templates/rbac-aggregateclusterroles.yaml @@ -0,0 +1,43 @@ +{{- if .Values.rbac.aggregateToDefaultRoles }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Chart.Name }}-edit + labels: + {{- include "keda-http-add-on.labels" . | indent 4 }} + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: +- apiGroups: + - http.keda.sh + resources: + - httpscaledobjects + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Chart.Name }}-view + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + {{- include "keda-http-add-on.labels" . | indent 4 }} +rules: +- apiGroups: + - http.keda.sh + resources: + - httpscaledobjects + verbs: + - get + - list + - watch +{{- end -}} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/scaler/deployment.yaml b/addons/keda-http-add-on/templates/scaler/deployment.yaml new file mode 100644 index 000000000..37c944d1b --- /dev/null +++ b/addons/keda-http-add-on/templates/scaler/deployment.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: scaler + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-external-scaler + namespace: {{ .Release.Namespace }} +spec: + replicas: {{ .Values.scaler.replicas }} + selector: + matchLabels: + app.kubernetes.io/component: scaler + {{- include "keda-http-add-on.matchLabels" . | indent 6 }} + template: + metadata: + labels: + app.kubernetes.io/component: scaler + {{- include "keda-http-add-on.labels" . | indent 8 }} + spec: + imagePullSecrets: + {{- toYaml .Values.scaler.imagePullSecrets | nindent 8 }} + serviceAccountName: {{ .Chart.Name }}-external-scaler + {{- if .Values.podSecurityContext.scaler }} + securityContext: + {{- toYaml .Values.podSecurityContext.scaler | nindent 8 }} + {{- else }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - args: + - "--zap-log-level={{ .Values.logging.scaler.level }}" + - "--zap-encoder={{ .Values.logging.scaler.format }}" + - "--zap-time-encoding={{ .Values.logging.scaler.timeEncoding }}" + image: "{{ .Values.images.scaler }}:{{ .Values.images.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.scaler.pullPolicy | default "Always" }} + name: "{{ .Chart.Name }}-external-scaler" + ports: + - containerPort: {{ .Values.scaler.grpcPort }} + name: grpc + env: + - name: KEDA_HTTP_SCALER_TARGET_ADMIN_DEPLOYMENT + value: "{{ .Chart.Name }}-interceptor" + - name: KEDA_HTTP_SCALER_PORT + value: "{{ .Values.scaler.grpcPort }}" + - name: KEDA_HTTP_SCALER_TARGET_ADMIN_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: KEDA_HTTP_SCALER_TARGET_ADMIN_SERVICE + value: "{{ .Chart.Name }}-{{ .Values.interceptor.admin.service }}" + - name: KEDA_HTTP_SCALER_TARGET_ADMIN_PORT + value: "{{ default 9091 .Values.interceptor.admin.port }}" + - name: KEDA_HTTP_SCALER_STREAM_INTERVAL_MS + value: "{{ .Values.scaler.streamInterval }}" + resources: + {{- toYaml .Values.scaler.resources | nindent 10 }} + livenessProbe: + grpc: + port: {{ .Values.scaler.grpcPort }} + service: liveness + timeoutSeconds: 5 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + grpc: + port: {{ .Values.scaler.grpcPort }} + service: readiness + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + {{- if .Values.securityContext.scaler }} + securityContext: + {{- toYaml .Values.securityContext.scaler | nindent 10 }} + {{- else }} + securityContext: + {{- toYaml .Values.securityContext | nindent 10 }} + {{- end }} + terminationGracePeriodSeconds: 10 + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.scaler.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.scaler.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.scaler.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/scaler/rbac.yaml b/addons/keda-http-add-on/templates/scaler/rbac.yaml new file mode 100644 index 000000000..81a61f336 --- /dev/null +++ b/addons/keda-http-add-on/templates/scaler/rbac.yaml @@ -0,0 +1,43 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-external-scaler +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - http.keda.sh + resources: + - httpscaledobjects + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + httpscaledobjects.http.keda.sh/version: {{ .Values.images.tag | default .Chart.AppVersion }} + keda.sh/addon: {{ .Chart.Name }} + app: {{ .Chart.Name }} + name: {{ .Chart.Name }}-external-scaler + app.kubernetes.io/name: {{ .Chart.Name }}-external-scaler + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-external-scaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-external-scaler +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-external-scaler + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/scaler/service.yaml b/addons/keda-http-add-on/templates/scaler/service.yaml new file mode 100644 index 000000000..5de91f2f9 --- /dev/null +++ b/addons/keda-http-add-on/templates/scaler/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: scaler + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: "{{ .Chart.Name }}-{{ .Values.scaler.service }}" + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: grpc + port: {{ default 9090 .Values.scaler.grpcPort }} + targetPort: grpc + selector: + app.kubernetes.io/component: scaler + {{- include "keda-http-add-on.matchLabels" . | indent 4 }} \ No newline at end of file diff --git a/addons/keda-http-add-on/templates/scaler/serviceaccount.yaml b/addons/keda-http-add-on/templates/scaler/serviceaccount.yaml new file mode 100644 index 000000000..e6617cce2 --- /dev/null +++ b/addons/keda-http-add-on/templates/scaler/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: scaler + {{- include "keda-http-add-on.labels" . | indent 4 }} + name: {{ .Chart.Name }}-external-scaler + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/addons/keda-http-add-on/values.yaml b/addons/keda-http-add-on/values.yaml new file mode 100644 index 000000000..c0a4e7bbd --- /dev/null +++ b/addons/keda-http-add-on/values.yaml @@ -0,0 +1,274 @@ +# -- Additional labels to be applied to installed resources. Note that not all resources will receive these labels. +additionalLabels: {} + +crds: + # -- Whether to install the `HTTPScaledObject` [`CustomResourceDefinition`](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) + install: true + +logging: + operator: + # -- Logging level for KEDA http-add-on operator. + # allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string + level: info + # -- Logging format for KEDA http-add-on operator. + # allowed values: `json` or `console` + format: console + # -- Logging time encoding for KEDA http-add-on operator. + # allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` + timeEncoding: rfc3339 + + kubeRbacProxy: + # -- Logging level for KEDA http-add-on operator rbac proxy + # allowed values: `0` for info, `4` for debug, or an integer value greater than 0 + level: 10 + scaler: + # -- Logging level for KEDA http-add-on Scaler. + # allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string + level: info + # -- Logging format for KEDA http-add-on Scaler. + # allowed values: `json` or `console` + format: console + # -- Logging time encoding for KEDA http-add-on Scaler. + # allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` + timeEncoding: rfc3339 + interceptor: + # -- Logging level for KEDA http-add-on Interceptor. + # allowed values: `debug`, `info`, `error`, or an integer value greater than 0, specified as string + level: info + # -- Logging format for KEDA http-add-on Interceptor. + # allowed values: `json` or `console` + format: console + # -- Logging time encoding for KEDA http-add-on Interceptor. + # allowed values are `epoch`, `millis`, `nano`, `iso8601`, `rfc3339` or `rfc3339nano` + timeEncoding: rfc3339 + +# operator-specific configuration values +operator: + # -- The image pull secrets for the operator component + imagePullSecrets: [] + # -- The namespace to watch for new `HTTPScaledObject`s. Leave this blank (i.e. `""`) to tell the operator to watch all namespaces. + watchNamespace: "" + # -- The image pull policy for the operator component + pullPolicy: Always + # operator pod resource limits + resources: + # -- The CPU/memory resource limit for the operator component + limits: + cpu: 0.5 + memory: 64Mi + # -- The CPU/memory resource request for the operator component + requests: + cpu: 250m + memory: 20Mi + # -- The port for the operator main server to run on + port: 8443 + # -- Node selector for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)) + nodeSelector: {} + # -- Tolerations for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)) + tolerations: [] + # -- Affinity for pod scheduling ([docs](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/)) + affinity: {} + + kubeRbacProxy: + resources: + # -- The CPU/memory resource limit for the operator component's kube rbac proxy + limits: + cpu: 300m + memory: 200Mi + # -- The CPU/memory resource request for the operator component's kube rbac proxy + requests: + cpu: 10m + memory: 20Mi + +scaler: + # -- Number of replicas + replicas: 3 + # -- The image pull secrets for the scaler component + imagePullSecrets: [] + # -- The name of the Kubernetes `Service` for the scaler component + service: external-scaler + # -- The image pull policy for the scaler component + pullPolicy: Always + # -- The port for the scaler's gRPC server. This is the server that KEDA will send scaling requests to. + grpcPort: 9090 + # -- The number of "target requests" that the external scaler will report to KEDA for the interceptor's scaling metrics. See the [KEDA external scaler documentation](https://keda.sh/docs/2.4/concepts/external-scalers/) for details on target requests. + pendingRequestsInterceptor: 200 + # -- Interval in ms for communicating IsActive to KEDA + streamInterval: 200 + # -- Node selector for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)) + nodeSelector: {} + # -- Tolerations for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)) + tolerations: [] + # -- Affinity for pod scheduling ([docs](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/)) + affinity: {} + resources: + limits: + cpu: 0.5 + memory: 64Mi + requests: + cpu: 250m + memory: 20Mi + +interceptor: + # -- The image pull secrets for the interceptor component + imagePullSecrets: [] + # -- The image pull policy for the interceptor component + pullPolicy: Always + # configurable values for the interceptor's admin + # service. the admin service is a cluster-internal + # HTTP interface for triggering debugging behavior + admin: + # -- The name of the Kubernetes `Service` for the interceptor's admin service + service: interceptor-admin + # -- The port for the interceptor's admin server to run on + port: 9090 + # configurable values for the interceptor's proxy + # service. the proxy service is the publicly accessible + # HTTP interface that production requests go to + proxy: + # -- The name of the Kubernetes `Service` for the interceptor's proxy service. This is the service that accepts live HTTP traffic. + service: interceptor-proxy + # -- The port on which the interceptor's proxy service will listen for live HTTP traffic + port: 8080 + replicas: + # -- The minimum number of interceptor replicas that should ever be running + min: 3 + # -- The maximum number of interceptor replicas that should ever be running + max: 50 + # -- The maximum time the interceptor should wait for an HTTP request to reach a backend before it is considered a failure + waitTimeout: 20s + + # configuration for the ScaledObject resource for the + # interceptor + scaledObject: + # -- The interval (in milliseconds) that KEDA should poll the external scaler to fetch scaling metrics about the interceptor + pollingInterval: 1 + + # -- How long the interceptor waits to establish TCP connections with backends before failing a request. + tcpConnectTimeout: 500ms + # -- The interceptor's connection keep alive timeout + keepAlive: 1s + # -- How long the interceptor will wait between forwarding a request to a backend and receiving response headers back before failing the request + responseHeaderTimeout: 500ms + # -- How often (in milliseconds) the interceptor does a full refresh of its endpoints cache. The interceptor will also use Kubernetes events to stay up-to-date with the endpoints cache changes. This duration is the maximum time it will take to see changes to the endpoints. + endpointsCachePollingIntervalMS: 250 + # -- Whether or not the interceptor should force requests to use HTTP/2 + forceHTTP2: false + # -- The maximum number of idle connections allowed in the interceptor's in-memory connection pool. Set to 0 to indicate no limit + maxIdleConns: 100 + # -- The timeout after which any idle connection is closed and removed from the interceptor's in-memory connection pool. + idleConnTimeout: 90s + # -- The maximum amount of time the interceptor will wait for a TLS handshake. Set to zero to indicate no timeout. + tlsHandshakeTimeout: 10s + # -- Special handling for responses with "Expect: 100-continue" response headers. see https://pkg.go.dev/net/http#Transport under the 'ExpectContinueTimeout' field for more details + expectContinueTimeout: 1s + # -- Node selector for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)) + nodeSelector: {} + # -- Tolerations for pod scheduling ([docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)) + tolerations: [] + # -- Affinity for pod scheduling ([docs](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/)) + affinity: {} + # interceptor pod resource limits + resources: + # -- The CPU/memory resource limit for the operator component + limits: + cpu: 0.5 + memory: 64Mi + # -- The CPU/memory resource request for the operator component + requests: + cpu: 250m + memory: 20Mi + +# configuration for the images to use for each component +images: + # tag is the image tag to use for all images. + # for example, if the operator image is "myoperator" and + # tag is "mytag", the operator image used will be + # "myoperator:mytag". `latest` is used to indicate the latest + # stable release in the official images, `canary` is + # the build for the latest commit to the `main` branch, + # and you can target any other commit with `sha-` + # -- Image tag for the http add on. This tag is applied to the images listed in `images.operator`, `images.interceptor`, and `images.scaler`. Optional, given app version of Helm chart is used by default + tag: "" + # -- Image name for the operator image component + operator: ghcr.io/kedacore/http-add-on-operator + # -- Image name for the interceptor image component + interceptor: ghcr.io/kedacore/http-add-on-interceptor + # -- Image name for the scaler image component + scaler: ghcr.io/kedacore/http-add-on-scaler + # the kube-rbac-proxy image to use + kubeRbacProxy: + # -- Image name for the Kube RBAC Proxy image component + name: gcr.io/kubebuilder/kube-rbac-proxy + # -- Image tag for the Kube RBAC Proxy image component + tag: v0.13.0 + +rbac: + # -- Install aggregate roles for edit and view + aggregateToDefaultRoles: false + +# -- [Security context] for all containers +# @default -- [See below](#KEDA-is-secure-by-default) +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + # runAsUser: 1000 + # runAsGroup: 1000 + # operator: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # kuberbacproxy: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # scaler: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + # interceptor: + # capabilities: + # drop: + # - ALL + # allowPrivilegeEscalation: false + # readOnlyRootFilesystem: true + # seccompProfile: + # type: RuntimeDefault + +# -- [Pod security context] for all pods +# @default -- [See below](#KEDA-is-secure-by-default) +podSecurityContext: + fsGroup: 1000 + supplementalGroups: + - 1000 + # operator: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + # scaler: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 + # interceptor: + # runAsNonRoot: true + # runAsUser: 1000 + # runAsGroup: 1000 + # fsGroup: 1000 \ No newline at end of file diff --git a/addons/kube-image-keeper/.helmignore b/addons/kube-image-keeper/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/addons/kube-image-keeper/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/addons/kube-image-keeper/Chart.lock b/addons/kube-image-keeper/Chart.lock new file mode 100644 index 000000000..2e293db45 --- /dev/null +++ b/addons/kube-image-keeper/Chart.lock @@ -0,0 +1,2 @@ +digest: sha256:deb5af1d98c80ea52289c771f4cae41c7ef73fbb231c86f8eda553a9d4d53cc8 +generated: "2024-01-23T16:17:31.822508041+01:00" diff --git a/addons/kube-image-keeper/Chart.yaml b/addons/kube-image-keeper/Chart.yaml new file mode 100644 index 000000000..8abb294f3 --- /dev/null +++ b/addons/kube-image-keeper/Chart.yaml @@ -0,0 +1,29 @@ +apiVersion: v2 +name: kube-image-keeper +description: kuik is a container image caching system for Kubernetes. +type: application +annotations: + artifacthub.io/containsSecurityUpdates: 'false' + artifacthub.io/prerelease: 'false' + artifacthub.io/license: MIT + artifacthub.io/changes: '' + artifacthub.io/links: | + - name: Chart Sources + url: https://github.com/enix/kube-image-keeper/tree/main/helm/kube-image-keeper + - name: Helm Repository + url: https://charts.enix.io +version: 1.9.0 +appVersion: 1.9.0 +home: https://github.com/enix/kube-image-keeper +sources: + - https://github.com/enix/kube-image-keeper +maintainers: + - name: Enix + email: contact@enix.fr + url: https://github.com/enixsas + - name: Paul Laffitte + email: paul.laffitte@enix.fr + url: https://github.com/paullaffitte + - name: David Donchez + email: david.donchez@enix.fr + url: https://github.com/donch \ No newline at end of file diff --git a/addons/kube-image-keeper/README.md.gotmpl b/addons/kube-image-keeper/README.md.gotmpl new file mode 100644 index 000000000..91c9598d1 --- /dev/null +++ b/addons/kube-image-keeper/README.md.gotmpl @@ -0,0 +1,362 @@ +# kube-image-keeper (kuik) + +[![Releases](https://github.com/enix/kube-image-keeper/actions/workflows/release.yml/badge.svg?branch=release)](https://github.com/enix/kube-image-keeper/releases) +[![Go report card](https://goreportcard.com/badge/github.com/enix/kube-image-keeper)](https://goreportcard.com/report/github.com/enix/kube-image-keeper) +[![MIT license](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) +[![Brought to you by Enix](https://img.shields.io/badge/Brought%20to%20you%20by-ENIX-%23377dff?labelColor=888&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAQAAAC1QeVaAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAAmJLR0QA/4ePzL8AAAAHdElNRQfkBAkQIg/iouK/AAABZ0lEQVQY0yXBPU8TYQDA8f/zcu1RSDltKliD0BKNECYZmpjgIAOLiYtubn4EJxI/AImzg3E1+AGcYDIMJA7lxQQQQRAiSSFG2l457+655x4Gfz8B45zwipWJ8rPCQ0g3+p9Pj+AlHxHjnLHAbvPW2+GmLoBN+9/+vNlfGeU2Auokd8Y+VeYk/zk6O2fP9fcO8hGpN/TUbxpiUhJiEorTgy+6hUlU5N1flK+9oIJHiKNCkb5wMyOFw3V9o+zN69o0Exg6ePh4/GKr6s0H72Tc67YsdXbZ5gENNjmigaXbMj0tzEWrZNtqigva5NxjhFP6Wfw1N1pjqpFaZQ7FAY6An6zxTzHs0BGqY/NQSnxSBD6WkDRTf3O0wG2Ztl/7jaQEnGNxZMdy2yET/B2xfGlDagQE1OgRRvL93UOHqhLnesPKqJ4NxLLn2unJgVka/HBpbiIARlHFq1n/cWlMZMne1ZfyD5M/Aa4BiyGSwP4Jl3UAAAAldEVYdGRhdGU6Y3JlYXRlADIwMjAtMDQtMDlUMTQ6MzQ6MTUrMDI6MDDBq8/nAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDIwLTA0LTA5VDE0OjM0OjE1KzAyOjAwsPZ3WwAAAABJRU5ErkJggg==)](https://enix.io) + +kube-image-keeper (a.k.a. *kuik*, which is pronounced /kwɪk/, like "quick") is a container image caching system for Kubernetes. +It saves the container images used by your pods in its own local registry so that these images remain available if the original becomes unavailable. + +## Upgrading + +### From 1.6.0 o 1.7.0 + +***ACTION REQUIRED*** + +To follow Helm3 best pratices, we moved `cachedimage` and `repository` custom resources definition from the helm templates directory to the dedicated `crds` directory. +This will cause the `cachedimage` CRD to be deleted during the 1.7.0 upgrade. + +We advice you to uninstall your helm release, clean the remaining custom resources by removing their finalizer, then reinstall kuik in 1.7.0 + +You may also recreate the custom resource definition right after the upgrade to 1.7.0 using +``` +kubectl apply -f https://raw.githubusercontent.com/enix/kube-image-keeper/main/helm/kube-image-keeper/crds/cachedimage-crd.yaml +kubectl apply -f https://raw.githubusercontent.com/enix/kube-image-keeper/main/helm/kube-image-keeper/crds/repository-crd.yaml +``` + + +## Why and when is it useful? + +At [Enix](https://enix.io/), we manage production Kubernetes clusters both for our internal use and for various customers; sometimes on premises, sometimes in various clouds, public or private. We regularly run into image availability issues, for instance: + +- the registry is unavailable or slow; +- a critical image was deleted from the registry (by accident or because of a misconfigured retention policy), +- the registry has pull quotas (or other rate-limiting mechanisms) and temporarily won't let us pull more images. + +(The last point is a well-known challenge when pulling lots of images from the Docker Hub, and becomes particularly painful when private Kubernetes nodes access the registry through a single NAT gateway!) + +We needed a solution that would: + +- work across a wide range of Kubernetes versions, container engines, and image registries, +- preserve Kubernetes' out-of-the-box image caching behavior and [image pull policies](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy), +- have fairly minimal requirements, +- and be easy and quick to install. + +We investigated other options, and we didn't find any that would quite fit our requirements, so we wrote kuik instead. + +## Prerequisites + +- A Kubernetes cluster¹ (duh!) +- Admin permissions² +- cert-manager³ +- Helm⁴ >= 3.2.0 +- CNI plugin with [port-mapper⁵](https://www.cni.dev/plugins/current/meta/portmap/) enabled +- In a production environment, we definitely recommend that you use persistent⁶ storage + +¹A local development cluster like minikube or KinD is fine. +
+²In addition to its own pods, kuik needs to register a MutatingWebhookConfiguration. +
+³kuik uses cert-manager to issue and configure its webhook certificate. You don't need to configure cert-manager in a particular way (you don't even need to create an Issuer or ClusterIssuer). It's alright to just `kubectl apply` the YAML as shown in the [cert-manager installation instructions](https://cert-manager.io/docs/installation/). +
+⁴If you prefer to install with "plain" YAML manifests, we'll tell you how to generate these manifests. +
+⁵Most CNI plugins these days enable port-mapper out of the box, so this shouldn't be an issue, but we're mentioning it just in case. +
+⁶You can use kuik without persistence, but if the pod running the registry gets deleted, you will lose your cached images. They will be automatically pulled again when needed, though. + +## Supported Kubernetes versions + +kuik has been developed for, and tested with, Kubernetes 1.24 to 1.28; but the code doesn't use any deprecated (or new) feature or API, and should work with newer versions as well. + +## How it works + +When a pod is created, kuik's **mutating webhook** rewrites its images on the fly to point to the local caching registry, adding a `localhost:{port}/` prefix (the `port` is 7439 by default, and is configurable). This means that you don't need to modify/rewrite the source registry url of your manifest/helm chart used to deploy your solution, kuik will take care of it. + +On `localhost:{port}`, there is an **image proxy** that serves images from kuik's **caching registry** (when the images have been cached) or directly from the original registry (when the images haven't been cached yet). + +One **controller** watches pods, and when it notices new images, it creates `CachedImage` custom resources for these images. + +Another **controller** watches these `CachedImage` custom resources, and copies images from source registries to kuik's caching registry accordingly. When images come from a private registry, the controller will use the `imagePullSecrets` from the `CachedImage` spec, those are set from the pod that produced the `CachedImage`. + +Here is what our images look like when using kuik: + +```bash +$ kubectl get pods -o custom-columns=NAME:metadata.name,IMAGES:spec.containers[*].image +NAME IMAGES +debugger localhost:7439/registrish.s3.amazonaws.com/alpine +factori-0 localhost:7439/factoriotools/factorio:1.1 +nvidiactk-b5f7m localhost:7439/nvcr.io/nvidia/k8s/container-toolkit:v1.12.0-ubuntu20.04 +sshd-8b8c6cfb6-l2tc9 localhost:7439/ghcr.io/jpetazzo/shpod +web-8667899c97-2v88h localhost:7439/nginx +web-8667899c97-89j2h localhost:7439/nginx +web-8667899c97-fl54b localhost:7439/nginx +``` + +The kuik controllers keep track of how many pods use a given image. When an image isn't used anymore, it is flagged for deletion, and removed one month later. This expiration delay can be configured. You can see kuik's view of your images by looking at the `CachedImages` custom resource: + +```bash +$ kubectl get cachedimages +NAME CACHED EXPIRES AT PODS COUNT AGE +docker.io-dockercoins-hasher-v0.1 true 2023-03-07T10:50:14Z 36m +docker.io-factoriotools-factorio-1.1 true 1 4m1s +docker.io-jpetazzo-shpod-latest true 2023-03-07T10:53:57Z 9m18s +docker.io-library-nginx-latest true 3 36m +ghcr.io-jpetazzo-shpod-latest true 1 36m +nvcr.io-nvidia-k8s-container-toolkit-v1.12.0-ubuntu20.04 true 1 29m +registrish.s3.amazonaws.com-alpine-latest 1 35m +``` + +## Architecture and components + +In kuik's namespace, you will find: + +- a `Deployment` to run kuik's controllers, +- a `DaemonSet` to run kuik's image proxy, +- a `StatefulSet` to run kuik's image cache, a `Deployment` is used instead when this component runs in HA mode. + +The image cache will obviously require a bit of disk space to run (see [Garbage collection and limitations](#garbage-collection-and-limitations) below). Otherwise, kuik's components are fairly lightweight in terms of compute resources. This shows CPU and RAM usage with the default setup, featuring two controllers in HA mode: + +```bash +$ kubectl top pods +NAME CPU(cores) MEMORY(bytes) +kube-image-keeper-0 1m 86Mi +kube-image-keeper-controllers-5b5cc9fcc6-bv6cp 1m 16Mi +kube-image-keeper-controllers-5b5cc9fcc6-tjl7t 3m 24Mi +kube-image-keeper-proxy-54lzk 1m 19Mi +``` + +![Architecture](https://raw.githubusercontent.com/enix/kube-image-keeper/main/docs/architecture.jpg) + +### Metrics + +Refer to the [dedicated documentation](https://github.com/enix/kube-image-keeper/blob/main/docs/metrics.md). + +## Installation + +1. Make sure that you have cert-manager installed. If not, check its [installation page](https://cert-manager.io/docs/installation/) (it's fine to use the `kubectl apply` one-liner, and no further configuration is required). +1. Install kuik's Helm chart from our [charts](https://charts.enix.io) repository: + +```bash +helm upgrade --install \ + --create-namespace --namespace kuik-system \ + kube-image-keeper kube-image-keeper \ + --repo https://charts.enix.io/ +``` + +That's it! + +Our container images are available across multiple registries for reliability. You can find them on [Github Container Registry](https://github.com/enix/kube-image-keeper/pkgs/container/kube-image-keeper), [Quay](https://quay.io/repository/enix/kube-image-keeper) and [DockerHub](https://hub.docker.com/r/enix/kube-image-keeper). + +CAUTION: If you use a storage backend that runs in the same cluster as kuik but in a different namespace, be sure to filter the storage backend's pods. Failure to do so may lead to interdependency issues, making it impossible to start both kuik and its storage backend if either encounters an issue. + +{{ template "chart.valuesSection" . }} + +## Installation with plain YAML files + +You can use Helm to generate plain YAML files and then deploy these YAML files with `kubectl apply` or whatever you want: + +```bash +helm template --namespace kuik-system \ + kube-image-keeper kube-image-keeper \ + --repo https://charts.enix.io/ \ + > /tmp/kuik.yaml +kubectl create namespace kuik-system +kubectl apply -f /tmp/kuik.yaml --namespace kuik-system +``` + +## Configuration and customization + +If you want to change e.g. the expiration delay, the port number used by the proxy, enable persistence (with a PVC) for the registry cache... You can do that with standard Helm values. + +You can see the full list of parameters (along with their meaning and default values) in the chart's [values.yaml](https://github.com/enix/kube-image-keeper/blob/main/helm/kube-image-keeper/values.yaml) file, or on [kuik's page on the Artifact Hub](https://artifacthub.io/packages/helm/enix/kube-image-keeper). + +For instance, to extend the expiration delay to 3 months (90 days), you can deploy kuik like this: + +```bash +helm upgrade --install \ + --create-namespace --namespace kuik-system \ + kube-image-keeper kube-image-keeper \ + --repo https://charts.enix.io/ \ + --set cachedImagesExpiryDelay=90 +``` + +## Advanced usage + +### Pod filtering + +There are 3 ways to tell kuik which pods it should manage (or, conversely, which ones it should ignore). + +- If a pod has the label `kube-image-keeper.enix.io/image-caching-policy=ignore`, kuik will ignore the pod (it will not rewrite its image references). +- If a pod is in an ignored Namespace, it will also be ignored. Namespaces can be ignored by setting the Helm value `controllers.webhook.ignoredNamespaces` (`kube-system` and the kuik namespace will be ignored whatever the value of this parameter). (Note: this feature relies on the [NamespaceDefaultLabelName](https://kubernetes.io/docs/concepts/services-networking/network-policies/#targeting-a-namespace-by-its-name) feature gate to work.) +- Finally, kuik will only work on pods matching a specific selector. By default, the selector is empty, which means "match all the pods". The selector can be set with the Helm value `controllers.webhook.objectSelector.matchExpressions`. + +This logic isn't implemented by the kuik controllers or webhook directly, but through Kubernetes' standard webhook object selectors. In other words, these parameters end up in the `MutatingWebhookConfiguration` template to filter which pods get presented to kuik's webhook. When the webhook rewrites the images for a pod, it adds a label to that pod, and the kuik controllers then rely on that label to know which `CachedImages` resources to create. + +Keep in mind that kuik will ignore pods scheduled into its own namespace or in the `kube-system` namespace as recommended in the kubernetes documentation ([Avoiding operating on the kube-system namespace](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#avoiding-operating-on-the-kube-system-namespace)). + +> It is recommended to exclude the namespace where your webhook is running with a namespaceSelector. +> [...] +> Accidentally mutating or rejecting requests in the kube-system namespace may cause the control plane components to stop functioning or introduce unknown behavior. + +#### Image pull policy + +In the case of a container configured with `imagePullPolicy: Never`, the container will always be filtered out as it makes no sense to cache an image that would never be cached and always read from the disk. + +In the case of a container configured with `imagePullPolicy: Always`, or with the tag `latest`, or with no tag (defaulting to `latest`), by default, the container will be filtered out in order to keep the default behavior of kubernetes, which is to always pull the new version of the image (thus not using the cache of kuik). This can be disabled by setting the value `controllers.webhook.ignorePullPolicyAlways` to `false`. + +### Cache persistence + +Persistence is disabled by default. You can enable it by setting the Helm value `registry.persistence.enabled=true`. This will create a PersistentVolumeClaim with a default size of 20 GiB. You can change that size by setting the value `registry.persistence.size`. Keep in mind that enabling persistence isn't enough to provide high availability of the registry! If you want kuik to be highly available, please refer to the [high availability guide](https://github.com/enix/kube-image-keeper/blob/main/docs/high-availability.md). + +Note that persistence requires your cluster to have some PersistentVolumes. If you don't have PersistentVolumes, kuik's registry Pod will remain `Pending` and your images won't be cached (but they will still be served transparently by kuik's image proxy). + +### Retain policy + +Sometimes, you want images to stay cached even when they are not used anymore (for instance when you run a workload for a fixed amount of time, stop it, and run it again later). You can choose to prevent `CachedImages` from expiring by manually setting the `spec.retain` flag to `true` like shown below: + +```yaml +apiVersion: kuik.enix.io/v1alpha1 +kind: CachedImage +metadata: + name: docker.io-library-nginx-1.25 +spec: + retain: true # here + sourceImage: nginx:1.25 +``` + +### Multi-arch cluster / Non-amd64 architectures + +By default, kuik only caches the `amd64` variant of an image. To cache more/other architectures, you need to set the `architectures` field in your helm values. + +Example: + +```yaml +architectures: [amd64, arm] +``` + +Kuik will only cache available architectures for an image, but will not crash if the architecture doesn't exist. + +No manual action is required when migrating an amd64-only cluster from v1.3.0 to v1.4.0. + +### Corporate proxy + +To configure kuik to work behind a corporate proxy, you can set the well known `http_proxy` and `https_proxy` environment variables (upper and lowercase variant both works) through helm values `proxy.env` and `controllers.env` like shown below: + +```yaml +controllers: + env: + - name: http_proxy + value: https://proxy.mycompany.org:3128 + - name: https_proxy + value: https://proxy.mycompany.org:3128 +proxy: + env: + - name: http_proxy + value: https://proxy.mycompany.org:3128 + - name: https_proxy + value: https://proxy.mycompany.org:3128 +``` + +Be careful that both the proxy and the controllers need to access the kubernetes API, so you might need to define the `no_proxy` variable as well to ignore the kubernetes API in case it is not reachable from your proxy (which is true most of the time). + +### Insecure registries & self-signed certificates + +In some cases, you may want to use images from self-hosted registries that are insecure (without TLS or with an invalid certificate for instance) or using a self-signed certificate. By default, kuik will not allow to cache images from those registries for security reasons, even though you configured your container runtime (e.g. Docker, containerd) to do so. However you can choose to trust a list of insecure registries to pull from using the helm value `insecureRegistries`. If you use a self-signed certificate you can store the root certificate authority in a secret and reference it with the helm value `rootCertificateAuthorities`. Here is an example of the use of those two values: + +```yaml +insecureRegistries: + - http://some-registry.com + - https://some-other-registry.com + +rootCertificateAuthorities: + secretName: some-secret + keys: + - root.pem +``` + +You can of course use as many insecure registries or root certificate authorities as you want. In the case of a self-signed certificate, you can either use the `insecureRegistries` or the `rootCertificateAuthorities` value, but trusting the root certificate will always be more secure than allowing insecure registries. + +### Registry UI + +For debugging reasons, it may be useful to be able to access the registry through an UI. This can be achieved by enabling the registry UI with the value `registryUI.enabled=true`. The UI will not be publicly available through an ingress, you will need to open a port-forward from port `80`. You can set a custom username and password with values `registryUI.auth.username` (default is `admin`) and `registryUI.auth.password` (empty by default). + +## Garbage collection and limitations + +When a CachedImage expires because it is not used anymore by the cluster, the image is deleted from the registry. However, since kuik uses [Docker's registry](https://docs.docker.com/registry/), this only deletes **reference files** like tags. It doesn't delete blobs, which account for most of the used disk space. [Garbage collection](https://docs.docker.com/registry/garbage-collection/) allows removing those blobs and free up space. The garbage collecting job can be configured to run thanks to the `registry.garbageCollectionSchedule` configuration in a cron-like format. It is disabled by default, because running garbage collection without persistence would just wipe out the cache registry. + +Garbage collection can only run when the registry is read-only (or stopped), otherwise image corruption may happen. (This is described in the [registry documentation](https://docs.docker.com/registry/garbage-collection/).) Before running garbage collection, kuik stops the registry. During that time, all image pulls are automatically proxified to the source registry so that garbage collection is mostly transparent for cluster nodes. + +Reminder: since garbage collection recreates the cache registry pod, if you run garbage collection without persistence, this will wipe out the cache registry. It is not recommended for production setups! + +Currently, if the cache gets deleted, the `status.isCached` field of `CachedImages` isn't updated automatically, which means that `kubectl get cachedimages` will incorrectly report that images are cached. However, you can trigger a controller reconciliation with the following command, which will pull all images again: + +```bash +kubectl annotate cachedimages --all --overwrite "timestamp=$(date +%s)" +``` + +## Known issues + +### Conflicts with other mutating webhooks + +Kuik's core functionality intercepts pod creation events to modify the definition of container images, facilitating image caching. However, some Kubernetes operators create pods autonomously and don't expect modifications to the image definitions (for example cloudnative-pg), the unexpected rewriting of the `pod.specs.containers.image` field can lead to inifinite reconciliation loop because the operator's expected target container image will be endlessly rewritten by the kuik `MutatingWebhookConfiguration`. In that case, you may want to disable kuik for specific pods using the following Helm values: + +```bash +controllers: + webhook: + objectSelector: + matchExpressions: + - key: cnpg.io/podRole + operator: NotIn + values: + - instance +``` + +### Private images are a bit less private + +Imagine the following scenario: + +- pods A and B use a private image, `example.com/myimage:latest` +- pod A correctly references `imagePullSecrets, but pod B does not + +On a normal Kubernetes cluster (without kuik), if pods A and B are on the same node, then pod B will run correctly, even though it doesn't reference `imagePullSecrets`, because the image gets pulled when starting pod A, and once it's available on the node, any other pod can use it. However, if pods A and B are on different nodes, pod B won't start, because it won't be able to pull the private image. Some folks may use that to segregate sensitive image to specific nodes using a combination of taints, tolerations, or node selectors. + +Howevever, when using kuik, once an image has been pulled and stored in kuik's registry, it becomes available for any node on the cluster. This means that using taints, tolerations, etc. to limit sensitive images to specific nodes won't work anymore. + +### Cluster autoscaling delays + +With kuik, all image pulls (except in the namespaces excluded from kuik) go through kuik's registry proxy, which runs on each node thanks to a DaemonSet. When a node gets added to a Kubernetes cluster (for instance, by the cluster autoscaler), a kuik registry proxy Pod gets scheduled on that node, but it will take a brief moment to start. During that time, all other image pulls will fail. Thanks to Kubernetes automatic retry mechanisms, they will eventually succeed, but on new nodes, you may see Pods in `ErrImagePull` or `ImagePullBackOff` status for a minute before everything works correctly. If you are using cluster autoscaling and try to achieve very fast scale-up times, this is something that you might want to keep in mind. + +### Garbage collection issue + +We use Docker Distribution in Kuik, along with the integrated garbage collection tool. There is a bug that occurs when untagged images are pushed into the registry, causing it to crash. It's possible to end up in a situation where the registry is in read-only mode and becomes unusable. Until a permanent solution is found, we advise keeping the value `registry.garbageCollection.deleteUntagged` set to false. + +### Images with digest + +As of today, there is no way to manage container images based on a digest. The rational behind this limitation is that a digest is an image manifest hash, and the manifest contains the registry URL associated with the image. Thus, pushing the image to another registry (our cache registry) changes its digest and as a consequence, it is not anymore referenced by its original digest. Digest validation prevent from pushing a manifest with an invalid digest. Therefore, we currently ignore all images based on a digest, those images will not be rewritten nor put in cache to prevent malfunctionning of kuik. + + +## License + +MIT License + +Copyright (c) 2020-2023 Enix SAS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/addons/kube-image-keeper/crds/cachedimage-crd.yaml b/addons/kube-image-keeper/crds/cachedimage-crd.yaml new file mode 100644 index 000000000..cb9a882e8 --- /dev/null +++ b/addons/kube-image-keeper/crds/cachedimage-crd.yaml @@ -0,0 +1,116 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: cachedimages.kuik.enix.io +spec: + group: kuik.enix.io + names: + kind: CachedImage + listKind: CachedImageList + plural: cachedimages + shortNames: + - ci + singular: cachedimage + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .status.isCached + name: Cached + type: boolean + - jsonPath: .spec.retain + name: Retain + type: boolean + - jsonPath: .spec.expiresAt + name: Expires at + type: string + - jsonPath: .status.usedBy.count + name: Pods count + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: CachedImage is the Schema for the cachedimages API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CachedImageSpec defines the desired state of CachedImage + properties: + expiresAt: + format: date-time + type: string + retain: + type: boolean + sourceImage: + type: string + required: + - sourceImage + type: object + status: + description: CachedImageStatus defines the observed state of CachedImage + properties: + availableUpstream: + type: boolean + digest: + type: string + isCached: + type: boolean + lastSeenUpstream: + format: date-time + type: string + lastSuccessfulPull: + format: date-time + type: string + lastSync: + format: date-time + type: string + phase: + type: string + upToDate: + type: boolean + upstreamDigest: + type: string + usedBy: + properties: + count: + description: |- + jsonpath function .length() is not implemented, so the count field is required to display pods count in additionalPrinterColumns + see https://github.com/kubernetes-sigs/controller-tools/issues/447 + type: integer + pods: + items: + properties: + namespacedName: + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/addons/kube-image-keeper/crds/registry-crd.yaml b/addons/kube-image-keeper/crds/registry-crd.yaml new file mode 100644 index 000000000..5bf510815 --- /dev/null +++ b/addons/kube-image-keeper/crds/registry-crd.yaml @@ -0,0 +1,157 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: repositories.kuik.enix.io +spec: + group: kuik.enix.io + names: + kind: Repository + listKind: RepositoryList + plural: repositories + shortNames: + - repo + singular: repository + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .status.images + name: Images + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Repository is the Schema for the repositories API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RepositorySpec defines the desired state of Repository + properties: + name: + type: string + pullSecretNames: + items: + type: string + type: array + pullSecretsNamespace: + type: string + updateFilters: + items: + type: string + type: array + updateInterval: + type: string + required: + - name + type: object + status: + description: RepositoryStatus defines the observed state of Repository + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + images: + type: integer + lastUpdate: + format: date-time + type: string + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/addons/kube-image-keeper/templates/NOTES.txt b/addons/kube-image-keeper/templates/NOTES.txt new file mode 100644 index 000000000..8f5f6b08b --- /dev/null +++ b/addons/kube-image-keeper/templates/NOTES.txt @@ -0,0 +1 @@ +CAUTION: If you use a storage backend that runs in the same cluster as kuik but in a different namespace, be sure to filter the storage backend's pods. Failure to do so may lead to interdependency issues, making it impossible to start both kuik and its storage backend if either encounters an issue. diff --git a/addons/kube-image-keeper/templates/_helpers.tpl b/addons/kube-image-keeper/templates/_helpers.tpl new file mode 100644 index 000000000..280fcc9e2 --- /dev/null +++ b/addons/kube-image-keeper/templates/_helpers.tpl @@ -0,0 +1,114 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-image-keeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-image-keeper.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kube-image-keeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kube-image-keeper.labels" -}} +helm.sh/chart: {{ include "kube-image-keeper.chart" . }} +{{ include "kube-image-keeper.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{- define "kube-image-keeper.controllers-labels" -}} +{{ include "kube-image-keeper.labels" . }} +app.kubernetes.io/component: controllers +{{- end }} + +{{- define "kube-image-keeper.proxy-labels" -}} +{{ include "kube-image-keeper.labels" . }} +app.kubernetes.io/component: proxy +{{- end }} + +{{- define "kube-image-keeper.registry-labels" -}} +{{ include "kube-image-keeper.labels" . }} +app.kubernetes.io/component: registry +{{- end }} + +{{- define "kube-image-keeper.registry-ui-labels" -}} +{{ include "kube-image-keeper.labels" . }} +app.kubernetes.io/component: registry-ui +{{- end }} + +{{- define "kube-image-keeper.garbage-collection-labels" -}} +{{ include "kube-image-keeper.labels" . }} +app.kubernetes.io/component: garbage-collection +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kube-image-keeper.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kube-image-keeper.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "kube-image-keeper.controllers-selectorLabels" -}} +{{ include "kube-image-keeper.selectorLabels" . }} +app.kubernetes.io/component: controllers +control-plane: controller-manager +{{- end }} + +{{- define "kube-image-keeper.proxy-selectorLabels" -}} +{{ include "kube-image-keeper.selectorLabels" . }} +app.kubernetes.io/component: proxy +control-plane: controller-manager +{{- end }} + +{{- define "kube-image-keeper.registry-selectorLabels" -}} +{{ include "kube-image-keeper.selectorLabels" . }} +app.kubernetes.io/component: registry +{{- end }} + +{{- define "kube-image-keeper.registry-ui-selectorLabels" -}} +{{ include "kube-image-keeper.selectorLabels" . }} +app.kubernetes.io/component: registry-ui +{{- end }} + +{{- define "kube-image-keeper.garbage-collection-selectorLabels" -}} +{{ include "kube-image-keeper.selectorLabels" . }} +app.kubernetes.io/component: garbage-collection +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-image-keeper.serviceAccountName" -}} +{{- default (printf "%s-%s" (include "kube-image-keeper.fullname" .) "controllers") .Values.serviceAccount.name }} +{{- end }} + +{{- define "kube-image-keeper.registry-stateless-mode" -}} +{{- ternary "true" "false" (or .Values.minio.enabled (not (empty .Values.registry.persistence.s3))) }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/clusterrole.yaml b/addons/kube-image-keeper/templates/clusterrole.yaml new file mode 100644 index 000000000..e7626ddcc --- /dev/null +++ b/addons/kube-image-keeper/templates/clusterrole.yaml @@ -0,0 +1,155 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "kube-image-keeper.serviceAccountName" . }} +rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - pods/finalizers + verbs: + - update + - apiGroups: + - "" + resources: + - pods/status + verbs: + - get + - patch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - kuik.enix.io + resources: + - cachedimages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - kuik.enix.io + resources: + - cachedimages/finalizers + verbs: + - update + - apiGroups: + - kuik.enix.io + resources: + - cachedimages/status + verbs: + - get + - patch + - update + - apiGroups: + - kuik.enix.io + resources: + - repositories + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - kuik.enix.io + resources: + - repositories/finalizers + verbs: + - update + - apiGroups: + - kuik.enix.io + resources: + - repositories/status + verbs: + - get + - patch + - update + {{- if .Values.psp.create }} + - apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ include "kube-image-keeper.fullname" . }} + {{- end }} + +--- +# permissions to do leader election. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "kube-image-keeper.serviceAccountName" . }}-leader-election +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/addons/kube-image-keeper/templates/clusterrolebinding.yaml b/addons/kube-image-keeper/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..34d4ad508 --- /dev/null +++ b/addons/kube-image-keeper/templates/clusterrolebinding.yaml @@ -0,0 +1,26 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "kube-image-keeper.serviceAccountName" . }} +roleRef: + kind: ClusterRole + name: {{ include "kube-image-keeper.serviceAccountName" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "kube-image-keeper.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "kube-image-keeper.serviceAccountName" . }}-leader-election +roleRef: + kind: ClusterRole + name: {{ include "kube-image-keeper.serviceAccountName" . }}-leader-election + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "kube-image-keeper.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/addons/kube-image-keeper/templates/controller-deployment.yaml b/addons/kube-image-keeper/templates/controller-deployment.yaml new file mode 100644 index 000000000..7df329e20 --- /dev/null +++ b/addons/kube-image-keeper/templates/controller-deployment.yaml @@ -0,0 +1,124 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-controllers + labels: + {{- include "kube-image-keeper.controllers-labels" . | nindent 4 }} +spec: + {{- with .Values.controllers.replicas }} + replicas: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "kube-image-keeper.controllers-selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.controllers.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kube-image-keeper.controllers-selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.controllers.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kube-image-keeper.serviceAccountName" . }} + {{- if .Values.controllers.priorityClassName }} + priorityClassName: {{ .Values.controllers.priorityClassName | quote }} + {{- end }} + securityContext: + {{- toYaml .Values.controllers.podSecurityContext | nindent 8 }} + containers: + - name: cache-manager + securityContext: + {{- toYaml .Values.controllers.securityContext | nindent 12 }} + image: "{{ .Values.controllers.image.repository }}:{{ .Values.controllers.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.controllers.image.pullPolicy }} + command: + - manager + - -leader-elect + - -expiry-delay={{ .Values.cachedImagesExpiryDelay }} + - -proxy-port={{ .Values.proxy.hostPort }} + - -registry-endpoint={{ include "kube-image-keeper.fullname" . }}-registry:5000 + - -max-concurrent-cached-image-reconciles={{ .Values.controllers.maxConcurrentCachedImageReconciles }} + - -zap-log-level={{ .Values.controllers.verbosity }} + - -ignore-pull-policy-always={{- .Values.controllers.webhook.ignorePullPolicyAlways }} + {{- range .Values.controllers.webhook.ignoredImages }} + - -ignore-images={{- . }} + {{- end }} + {{- range .Values.architectures }} + - -arch={{- . }} + {{- end }} + {{- range .Values.insecureRegistries }} + - -insecure-registries={{- . }} + {{- end }} + {{- with .Values.rootCertificateAuthorities }} + {{- range .keys }} + - -root-certificate-authorities=/etc/ssl/certs/registry-certificate-authorities/{{- . }} + {{- end }} + {{- end }} + env: + {{- $noProxy := list -}} + {{- range .Values.controllers.env }} + {{- if eq (lower .name) "no_proxy" }} + {{- $noProxy = (.value | replace " " "," | splitList ",") -}} + {{- else }} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + - name: no_proxy + value: {{ join "," (prepend $noProxy (printf "%s-registry" (include "kube-image-keeper.fullname" .))) }} + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 8080 + name: metrics + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: webhook-cert + readOnly: true + {{- if .Values.rootCertificateAuthorities }} + - mountPath: /etc/ssl/certs/registry-certificate-authorities + name: registry-certificate-authorities + readOnly: true + {{- end }} + {{- with .Values.controllers.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.controllers.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.controllers.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.controllers.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controllers.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controllers.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: webhook-cert + secret: + defaultMode: 420 + secretName: {{ include "kube-image-keeper.fullname" . }}-webhook-server-cert + {{- with .Values.rootCertificateAuthorities }} + - name: registry-certificate-authorities + secret: + defaultMode: 420 + secretName: {{ .secretName }} + {{- end }} diff --git a/addons/kube-image-keeper/templates/controller-poddisruptionbudget.yaml b/addons/kube-image-keeper/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 000000000..fd305033f --- /dev/null +++ b/addons/kube-image-keeper/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if .Values.controllers.pdb.create }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-controllers + labels: + {{- include "kube-image-keeper.controllers-labels" . | nindent 4 }} +spec: + {{- if .Values.controllers.pdb.minAvailable }} + minAvailable: {{ .Values.controllers.pdb.minAvailable }} + {{- end }} + {{- if .Values.controllers.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.controllers.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "kube-image-keeper.controllers-selectorLabels" . | nindent 6 }} +{{- end }} \ No newline at end of file diff --git a/addons/kube-image-keeper/templates/controller-podmonitor.yaml b/addons/kube-image-keeper/templates/controller-podmonitor.yaml new file mode 100644 index 000000000..eca5be732 --- /dev/null +++ b/addons/kube-image-keeper/templates/controller-podmonitor.yaml @@ -0,0 +1,23 @@ +{{- if .Values.controllers.podMonitor.create }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-controllers + labels: + {{- include "kube-image-keeper.controllers-labels" . | nindent 4 }} + {{- with .Values.controllers.podMonitor.extraLabels }} + {{- . | toYaml | trim | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "kube-image-keeper.controllers-selectorLabels" . | nindent 6 }} + podMetricsEndpoints: + - port: metrics + interval: {{ .Values.controllers.podMonitor.scrapeInterval }} + scrapeTimeout: {{ .Values.controllers.podMonitor.scrapeTimeout }} + {{- with .Values.controllers.podMonitor.relabelings }} + relabelings: + {{- . | toYaml | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/garbage-collection-cron-job.yaml b/addons/kube-image-keeper/templates/garbage-collection-cron-job.yaml new file mode 100644 index 000000000..10f8ba06f --- /dev/null +++ b/addons/kube-image-keeper/templates/garbage-collection-cron-job.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.registry.garbageCollection.schedule (or .Values.registry.persistence.enabled (eq (include "kube-image-keeper.registry-stateless-mode" .) "true")) }} +{{- if semverCompare ">=1.21-0" (default .Capabilities.KubeVersion.Version .Values.kubeVersion) -}} +apiVersion: batch/v1 +{{- else -}} +apiVersion: batch/v1beta1 +{{- end }} +kind: CronJob +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-garbage-collection + labels: + {{- include "kube-image-keeper.garbage-collection-labels" . | nindent 4 }} +spec: + concurrencyPolicy: Forbid + schedule: "{{ .Values.registry.garbageCollection.schedule }}" + jobTemplate: + spec: + backoffLimit: 3 + activeDeadlineSeconds: 600 + template: + spec: + serviceAccountName: {{ include "kube-image-keeper.fullname" . }}-registry-restart + restartPolicy: Never + containers: + - name: kubectl + image: "{{ .Values.registry.garbageCollection.image.repository }}:{{ .Values.registry.garbageCollection.image.tag }}" + imagePullPolicy: {{ .Values.registry.garbageCollection.image.pullPolicy }} + command: + - bash + - -c + - | + set -e + {{- if eq (include "kube-image-keeper.registry-stateless-mode" .) "true" }} + kubectl set env deploy {{ include "kube-image-keeper.fullname" . }}-registry REGISTRY_STORAGE_MAINTENANCE_READONLY="{\"enabled\":true}" + + # wait for deployment to berolled out and terminated pods to be deleted to prevent exec’ing into a terminating pod (see https://binx.io/2022/01/18/how-to-run-a-post-deployment-script-on-kubernetes/) + kubectl rollout status deploy {{ include "kube-image-keeper.fullname" . }}-registry + SELECTOR=$(kubectl get deploy {{ include "kube-image-keeper.fullname" . }}-registry -o wide --no-headers | awk '{print $NF}') + while [[ $(kubectl get pods --selector ${SELECTOR} --no-headers | awk '{print $3}' | uniq) != "Running" ]]; do + echo "waiting for terminating pods to be deleted" + sleep 1 + done + + kubectl exec deploy/{{ include "kube-image-keeper.fullname" . }}-registry -- bin/registry garbage-collect /etc/docker/registry/config.yml --delete-untagged={{ .Values.registry.garbageCollection.deleteUntagged }} + kubectl set env deploy {{ include "kube-image-keeper.fullname" . }}-registry REGISTRY_STORAGE_MAINTENANCE_READONLY- + {{- else }} + kubectl rollout restart sts {{ include "kube-image-keeper.fullname" . }}-registry + kubectl rollout status sts {{ include "kube-image-keeper.fullname" . }}-registry + {{- end }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/garbage-collection-role-binding.yaml b/addons/kube-image-keeper/templates/garbage-collection-role-binding.yaml new file mode 100644 index 000000000..ccad6ec10 --- /dev/null +++ b/addons/kube-image-keeper/templates/garbage-collection-role-binding.yaml @@ -0,0 +1,15 @@ +{{- if .Values.registry.garbageCollection.schedule }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-restart + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kube-image-keeper.fullname" . }}-registry-restart +subjects: + - kind: ServiceAccount + name: {{ include "kube-image-keeper.fullname" . }}-registry-restart + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/garbage-collection-role.yaml b/addons/kube-image-keeper/templates/garbage-collection-role.yaml new file mode 100644 index 000000000..82fa720c4 --- /dev/null +++ b/addons/kube-image-keeper/templates/garbage-collection-role.yaml @@ -0,0 +1,17 @@ +{{- if .Values.registry.garbageCollection.schedule }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-restart +rules: + - apiGroups: ["apps", "extensions"] + resources: ["statefulsets", "deployments"] + resourceNames: ["{{ include "kube-image-keeper.fullname" . }}-registry"] + verbs: ["get", "patch", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["list"] + - apiGroups: [""] + resources: ["pods/exec"] + verbs: ["create"] +{{- end }} diff --git a/addons/kube-image-keeper/templates/garbage-collection-service-account.yaml b/addons/kube-image-keeper/templates/garbage-collection-service-account.yaml new file mode 100644 index 000000000..e2df42e94 --- /dev/null +++ b/addons/kube-image-keeper/templates/garbage-collection-service-account.yaml @@ -0,0 +1,6 @@ +{{- if .Values.registry.garbageCollection.schedule }} +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-restart +{{- end }} diff --git a/addons/kube-image-keeper/templates/minio-registry-users.yaml b/addons/kube-image-keeper/templates/minio-registry-users.yaml new file mode 100644 index 000000000..3df6b1f65 --- /dev/null +++ b/addons/kube-image-keeper/templates/minio-registry-users.yaml @@ -0,0 +1,31 @@ +{{- if .Values.minio.enabled }} + +{{- $secretName := "kube-image-keeper-minio-registry-passwords" }} +{{- $secretData := (get (lookup "v1" "Secret" .Release.Namespace $secretName) "data") | default dict }} +# set passwords to existing secret data or generate random ones when they do not exist +{{- $password := (get $secretData "registry" | b64dec) | default (randAlphaNum 32) }} + +apiVersion: v1 +kind: Secret +metadata: + name: kube-image-keeper-minio-registry-users + annotations: + "helm.sh/resource-policy": "keep" +type: Opaque +stringData: + registry: | + username=registry + password={{ $password }} + policies=readwrite + setPolicies=true +--- +apiVersion: v1 +kind: Secret +metadata: + name: kube-image-keeper-minio-registry-passwords + annotations: + "helm.sh/resource-policy": "keep" +type: Opaque +stringData: + registry: {{ $password }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/mutatingwebhookconfiguration.yaml b/addons/kube-image-keeper/templates/mutatingwebhookconfiguration.yaml new file mode 100644 index 000000000..dc5f2ba43 --- /dev/null +++ b/addons/kube-image-keeper/templates/mutatingwebhookconfiguration.yaml @@ -0,0 +1,70 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "kube-image-keeper.fullname" . }}-serving-cert + name: {{ include "kube-image-keeper.fullname" . }}-mutating-webhook +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: {{ include "kube-image-keeper.fullname" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-core-v1-pod + failurePolicy: Ignore + reinvocationPolicy: IfNeeded + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: + - kube-system + - {{ .Release.Namespace }} + {{- if .Values.controllers.webhook.ignoredNamespaces }} + {{- range .Values.controllers.webhook.ignoredNamespaces }} + - {{ . | toYaml | indent 8 | trim }} + {{- end }} + {{- end }} + objectSelector: + matchExpressions: + - key: kube-image-keeper.enix.io/image-caching-policy + operator: NotIn + values: + - ignore + {{- range .Values.controllers.webhook.objectSelector.matchExpressions }} + - {{ . | toYaml | indent 6 | trim }} + {{- end }} + name: mpod.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: {{ include "kube-image-keeper.fullname" . }}-webhook + namespace: {{ .Release.Namespace }} + path: /mutate-kuik-enix-io-v1alpha1-cachedimage + failurePolicy: Fail + name: mcachedimage.kb.io + rules: + - apiGroups: + - kuik.enix.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - cachedimages + sideEffects: None diff --git a/addons/kube-image-keeper/templates/proxy-daemonset.yaml b/addons/kube-image-keeper/templates/proxy-daemonset.yaml new file mode 100644 index 000000000..427b89659 --- /dev/null +++ b/addons/kube-image-keeper/templates/proxy-daemonset.yaml @@ -0,0 +1,130 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-proxy + labels: + {{- include "kube-image-keeper.proxy-labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "kube-image-keeper.proxy-selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.proxy.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kube-image-keeper.proxy-selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.proxy.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kube-image-keeper.serviceAccountName" . }} + {{- if .Values.proxy.priorityClassName }} + priorityClassName: {{ .Values.proxy.priorityClassName | quote }} + {{- end }} + {{- if .Values.proxy.hostNetwork }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + {{- end }} + securityContext: + {{- toYaml .Values.proxy.podSecurityContext | nindent 8 }} + containers: + - name: cache-proxy + securityContext: + {{- toYaml .Values.proxy.securityContext | nindent 12 }} + image: "{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.proxy.image.pullPolicy }} + ports: + {{- if .Values.proxy.hostNetwork }} + - containerPort: {{ .Values.proxy.hostPort }} + hostPort: {{ .Values.proxy.hostPort }} + protocol: TCP + - containerPort: {{ .Values.proxy.metricsPort }} + hostPort: {{ .Values.proxy.metricsPort }} + name: metrics + protocol: TCP + {{- else }} + - containerPort: {{ .Values.proxy.hostPort }} + hostIP: {{ .Values.proxy.hostIp }} + hostPort: {{ .Values.proxy.hostPort }} + protocol: TCP + - containerPort: 8080 + name: metrics + protocol: TCP + {{- end }} + command: + - registry-proxy + - -v={{ .Values.proxy.verbosity }} + - -registry-endpoint={{ include "kube-image-keeper.fullname" . }}-registry:5000 + {{- with .Values.proxy.kubeApiRateLimits }} + - -kube-api-rate-limit-qps={{ .qps }} + - -kube-api-rate-limit-burst={{ .burst }} + {{- end }} + {{- range .Values.insecureRegistries }} + - -insecure-registries={{- . }} + {{- end }} + {{- with .Values.rootCertificateAuthorities }} + {{- range .keys }} + - -root-certificate-authorities=/etc/ssl/certs/registry-certificate-authorities/{{- . }} + {{- end }} + {{- end }} + {{- if .Values.proxy.hostNetwork }} + - -bind-address={{ .Values.proxy.hostIp }}:{{ .Values.proxy.hostPort }} + - -metrics-bind-address={{ .Values.proxy.hostIp }}:{{ .Values.proxy.metricsPort }} + {{- else }} + - -bind-address=:{{ .Values.proxy.hostPort }} + {{- end }} + env: + {{- with .Values.proxy.env }} + {{- toYaml . | nindent 12 }} + {{- end }} + - name: GIN_MODE + value: release + {{- if .Values.rootCertificateAuthorities }} + volumeMounts: + - mountPath: /etc/ssl/certs/registry-certificate-authorities + name: registry-certificate-authorities + readOnly: true + {{- end }} + {{- $readinessProbe := deepCopy .Values.proxy.readinessProbe }} + {{- if .Values.proxy.hostNetwork }} + {{- $readinessProbe := merge $readinessProbe.httpGet (dict "host" "localhost") }} + {{- end }} + {{- with .Values.proxy.readinessProbe }} + readinessProbe: + {{- $readinessProbe | toYaml | nindent 12 }} + {{- end }} + {{- $livenessProbe := deepCopy .Values.proxy.livenessProbe }} + {{- if .Values.proxy.hostNetwork }} + {{- $livenessProbe := merge $livenessProbe.httpGet (dict "host" "localhost") }} + {{- end }} + {{- with .Values.proxy.livenessProbe }} + livenessProbe: + {{- $livenessProbe | toYaml | nindent 12 }} + {{- end }} + {{- with .Values.proxy.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.proxy.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.proxy.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.proxy.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.rootCertificateAuthorities }} + volumes: + - name: registry-certificate-authorities + secret: + defaultMode: 420 + secretName: {{ .secretName }} + {{- end }} diff --git a/addons/kube-image-keeper/templates/proxy-podmonitor.yaml b/addons/kube-image-keeper/templates/proxy-podmonitor.yaml new file mode 100644 index 000000000..f168e9d1b --- /dev/null +++ b/addons/kube-image-keeper/templates/proxy-podmonitor.yaml @@ -0,0 +1,23 @@ +{{- if .Values.proxy.podMonitor.create }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-proxy + labels: + {{- include "kube-image-keeper.proxy-labels" . | nindent 4 }} + {{- with .Values.proxy.podMonitor.extraLabels }} + {{- . | toYaml | trim | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "kube-image-keeper.proxy-selectorLabels" . | nindent 6 }} + podMetricsEndpoints: + - port: metrics + interval: {{ .Values.proxy.podMonitor.scrapeInterval }} + scrapeTimeout: {{ .Values.proxy.podMonitor.scrapeTimeout }} + {{- with .Values.proxy.podMonitor.relabelings }} + relabelings: + {{- . | toYaml | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/psp.yaml b/addons/kube-image-keeper/templates/psp.yaml new file mode 100644 index 000000000..e98936da5 --- /dev/null +++ b/addons/kube-image-keeper/templates/psp.yaml @@ -0,0 +1,36 @@ +{{- if .Values.psp.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "kube-image-keeper.fullname" . }} + labels: + {{- include "kube-image-keeper.proxy-labels" . | nindent 4 }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'secret' + hostNetwork: true + hostPorts: + - min: {{ .Values.proxy.hostPort | int }} + max: {{ .Values.proxy.hostPort | int }} + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/addons/kube-image-keeper/templates/registry-deployment.yaml b/addons/kube-image-keeper/templates/registry-deployment.yaml new file mode 100644 index 000000000..6165e08d8 --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-deployment.yaml @@ -0,0 +1,113 @@ +{{- if eq (include "kube-image-keeper.registry-stateless-mode" .) "true" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry + labels: + {{- include "kube-image-keeper.registry-labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "kube-image-keeper.registry-selectorLabels" . | nindent 6 }} + replicas: {{ .Values.registry.replicas }} + template: + metadata: + {{- with .Values.registry.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kube-image-keeper.registry-selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.registry.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.registry.priorityClassName }} + priorityClassName: {{ .Values.registry.priorityClassName | quote }} + {{- end }} + serviceAccountName: {{ include "kube-image-keeper.fullname" . }}-registry + securityContext: + {{- toYaml .Values.registry.podSecurityContext | nindent 8 }} + containers: + - name: registry + securityContext: + {{- toYaml .Values.registry.securityContext | nindent 12 }} + image: "{{ .Values.registry.image.repository }}:{{ .Values.registry.image.tag }}" + imagePullPolicy: {{ .Values.registry.image.pullPolicy }} + ports: + - containerPort: 5000 + protocol: TCP + resources: + {{- toYaml .Values.registry.resources | nindent 12 }} + env: + - name: REGISTRY_HTTP_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kube-image-keeper.fullname" . }}-registry-http-secret + key: secret + - name: REGISTRY_STORAGE_DELETE_ENABLED + value: "true" + - name: REGISTRY_STORAGE + value: s3 + {{- if .Values.registry.serviceMonitor.create }} + - name: REGISTRY_HTTP_DEBUG_ADDR + value: 0.0.0.0:5001 + - name: REGISTRY_HTTP_DEBUG_PROMETHEUS_ENABLED + value: "true" + {{- end }} + {{- if .Values.minio.enabled }} + - name: REGISTRY_STORAGE_S3_REGION + value: us-east-1 + - name: REGISTRY_STORAGE_S3_BUCKET + value: registry + - name: REGISTRY_STORAGE_S3_REGIONENDPOINT + value: http://{{ .Values.minio.fullnameOverride }}:9000 + - name: REGISTRY_STORAGE_REDIRECT_DISABLE + value: "true" + {{- else }} + {{- range $k, $v := omit .Values.registry.persistence.s3 "accesskey" "secretkey" }} + - name: {{ printf "%s_%s" "REGISTRY_STORAGE_S3" ($k | upper) }} + value: {{ $v | quote }} + {{- end }} + {{- if .Values.registry.persistence.disableS3Redirections }} + - name: REGISTRY_STORAGE_REDIRECT_DISABLE + value: "true" + {{- end }} + {{- end }} + {{ $s3KeysSecretName := .Values.registry.persistence.s3ExistingSecret | default "kube-image-keeper-s3-registry-keys" }} + - name: REGISTRY_STORAGE_S3_ACCESSKEY + valueFrom: + secretKeyRef: + name: {{ $s3KeysSecretName }} + key: accessKey + - name: REGISTRY_STORAGE_S3_SECRETKEY + valueFrom: + secretKeyRef: + name: {{ $s3KeysSecretName }} + key: secretKey + {{- range .Values.registry.env }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- with .Values.registry.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.registry.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.registry.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registry.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registry.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/registry-http-secret.yaml b/addons/kube-image-keeper/templates/registry-http-secret.yaml new file mode 100644 index 000000000..cc156473c --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-http-secret.yaml @@ -0,0 +1,15 @@ +{{- if eq (include "kube-image-keeper.registry-stateless-mode" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-http-secret + annotations: + "helm.sh/resource-policy": "keep" +type: Opaque +stringData: + {{- $secretName := printf "%s-%s" (include "kube-image-keeper.fullname" .) "registry-http-secret" }} + {{- $secretData := (get (lookup "v1" "Secret" .Release.Namespace $secretName) "data") | default dict }} + # set $secret to existing secret data or generate a random one when it does not exist + {{- $secret := .Values.registry.httpSecret | default (get $secretData "secret" | b64dec) | default (randAlphaNum 32) }} + secret: {{ $secret }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/registry-poddisruptionbudget.yaml b/addons/kube-image-keeper/templates/registry-poddisruptionbudget.yaml new file mode 100644 index 000000000..14f26c5d8 --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.registry.pdb.create (eq (include "kube-image-keeper.registry-stateless-mode" .) "true") }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry + labels: + {{- include "kube-image-keeper.registry-labels" . | nindent 4 }} +spec: + {{- if .Values.registry.pdb.minAvailable }} + minAvailable: {{ .Values.registry.pdb.minAvailable }} + {{- end }} + {{- if .Values.registry.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.registry.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "kube-image-keeper.registry-selectorLabels" . | nindent 6 }} +{{- end }} \ No newline at end of file diff --git a/addons/kube-image-keeper/templates/registry-pvc.yaml b/addons/kube-image-keeper/templates/registry-pvc.yaml new file mode 100644 index 000000000..9eb7a899b --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-pvc.yaml @@ -0,0 +1,14 @@ +{{- if and (eq (include "kube-image-keeper.registry-stateless-mode" .) "false") (.Values.registry.persistence.enabled) (eq .Values.registry.persistence.accessModes "ReadWriteMany") }} + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-pvc +spec: + accessModes: + - {{ .Values.registry.persistence.accessModes }} + storageClassName: {{ .Values.registry.persistence.storageClass }} + resources: + requests: + storage: {{ .Values.registry.persistence.size }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/registry-serviceaccount.yaml b/addons/kube-image-keeper/templates/registry-serviceaccount.yaml new file mode 100644 index 000000000..0b65ba3dc --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry + labels: + {{- include "kube-image-keeper.labels" . | nindent 4 }} + {{- with .Values.registry.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/addons/kube-image-keeper/templates/registry-servicemonitor.yaml b/addons/kube-image-keeper/templates/registry-servicemonitor.yaml new file mode 100644 index 000000000..7f1c022a3 --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-servicemonitor.yaml @@ -0,0 +1,23 @@ +{{- if .Values.registry.serviceMonitor.create }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry + labels: + {{- include "kube-image-keeper.registry-labels" . | nindent 4 }} + {{- with .Values.registry.serviceMonitor.extraLabels }} + {{- . | toYaml | trim | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "kube-image-keeper.registry-selectorLabels" . | nindent 6 }} + endpoints: + - port: registry-metrics + interval: {{ .Values.registry.serviceMonitor.scrapeInterval }} + scrapeTimeout: {{ .Values.registry.serviceMonitor.scrapeTimeout }} + {{- with .Values.registry.serviceMonitor.relabelings }} + relabelings: + {{- . | toYaml | nindent 4 }} + {{- end }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/registry-statefulset.yaml b/addons/kube-image-keeper/templates/registry-statefulset.yaml new file mode 100644 index 000000000..6077b5f6c --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-statefulset.yaml @@ -0,0 +1,134 @@ +{{- if eq (include "kube-image-keeper.registry-stateless-mode" .) "false" }} + +{{- if and (gt (int .Values.registry.replicas) 1) (ne .Values.registry.persistence.accessModes "ReadWriteMany") -}} +{{ fail "registry needs a configured S3 endpoint or a PVC which supports ReadWriteMany to enable HA mode (>1 replicas), please enable minio or configure an external S3 endpoint" }} +{{- end }} + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry + labels: + {{- include "kube-image-keeper.registry-labels" . | nindent 4 }} +spec: + replicas: {{ .Values.registry.replicas }} + serviceName: {{ include "kube-image-keeper.fullname" . }}-registry + selector: + matchLabels: + {{- include "kube-image-keeper.registry-selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.registry.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kube-image-keeper.registry-selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.registry.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.registry.priorityClassName }} + priorityClassName: {{ .Values.registry.priorityClassName | quote }} + {{- end }} + serviceAccountName: {{ include "kube-image-keeper.fullname" . }}-registry + securityContext: + {{- toYaml .Values.registry.podSecurityContext | nindent 8 }} + {{- if .Values.registry.persistence.enabled }} + initContainers: + - name: setup-dirs + image: "{{ .Values.registry.image.repository }}:{{ .Values.registry.image.tag }}" + imagePullPolicy: {{ .Values.registry.image.pullPolicy }} + command: + - mkdir + - -p + - /var/lib/registry/docker/registry/v2/repositories/ + - /var/lib/registry/docker/registry/v2/blobs/ + resources: + {{- toYaml .Values.registry.resources | nindent 12 }} + volumeMounts: + - mountPath: /var/lib/registry + name: data + - name: garbage-collector + image: "{{ .Values.registry.image.repository }}:{{ .Values.registry.image.tag }}" + imagePullPolicy: {{ .Values.registry.image.pullPolicy }} + command: + - bin/registry + - garbage-collect + - /etc/docker/registry/config.yml + - --delete-untagged={{ .Values.registry.garbageCollection.deleteUntagged }} + resources: + {{- toYaml .Values.registry.resources | nindent 12 }} + volumeMounts: + - mountPath: /var/lib/registry + name: data + {{- end }} + containers: + - name: registry + securityContext: + {{- toYaml .Values.registry.securityContext | nindent 12 }} + image: "{{ .Values.registry.image.repository }}:{{ .Values.registry.image.tag }}" + imagePullPolicy: {{ .Values.registry.image.pullPolicy }} + ports: + - containerPort: 5000 + protocol: TCP + resources: + {{- toYaml .Values.registry.resources | nindent 12 }} + env: + - name: REGISTRY_STORAGE_DELETE_ENABLED + value: "true" + {{- if .Values.registry.serviceMonitor.create }} + - name: REGISTRY_HTTP_DEBUG_ADDR + value: 0.0.0.0:5001 + - name: REGISTRY_HTTP_DEBUG_PROMETHEUS_ENABLED + value: "true" + {{- end }} + {{- range .Values.registry.env }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- if .Values.registry.persistence.enabled }} + volumeMounts: + - mountPath: /var/lib/registry + name: data + {{- end }} + {{- with .Values.registry.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.registry.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.registry.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registry.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registry.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if and (.Values.registry.persistence.enabled) (ne .Values.registry.persistence.accessModes "ReadWriteMany") }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - {{ .Values.registry.persistence.accessModes }} + storageClassName: {{ .Values.registry.persistence.storageClass }} + resources: + requests: + storage: {{ .Values.registry.persistence.size }} + {{- end }} + {{- if and (.Values.registry.persistence.enabled) (eq .Values.registry.persistence.accessModes "ReadWriteMany") }} + volumes: + - name: data + persistentVolumeClaim: + claimName: {{ include "kube-image-keeper.fullname" . }}-registry-pvc + {{- end }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/registry-ui-deployment.yaml b/addons/kube-image-keeper/templates/registry-ui-deployment.yaml new file mode 100644 index 000000000..b425a2767 --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-ui-deployment.yaml @@ -0,0 +1,69 @@ +{{- if .Values.registryUI.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-ui + labels: + {{- include "kube-image-keeper.registry-ui-labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "kube-image-keeper.registry-ui-selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.registryUI.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kube-image-keeper.registry-ui-selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.registryUI.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.registryUI.podSecurityContext | nindent 8 }} + containers: + - name: registry-ui + securityContext: + {{- toYaml .Values.registryUI.securityContext | nindent 12 }} + image: {{ .Values.registryUI.image.repository }}:{{ .Values.registryUI.image.tag }} + imagePullPolicy: {{ .Values.registryUI.image.pullPolicy }} + ports: + - containerPort: 80 + resources: + {{- toYaml .Values.registryUI.resources | nindent 12 }} + env: + - name: REGISTRY_HOST + value: {{ include "kube-image-keeper.fullname" . }}-registry + - name: REGISTRY_PORT + value: "5000" + - name: REGISTRY_PROTOCOL + value: "http" + - name: SSL_VERIFY + value: "false" + - name: USERNAME + valueFrom: + secretKeyRef: + name: {{ include "kube-image-keeper.fullname" . }}-registry-ui-basic-auth + key: username + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kube-image-keeper.fullname" . }}-registry-ui-basic-auth + key: password + {{- with .Values.registryUI.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registryUI.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.registryUI.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/addons/kube-image-keeper/templates/registry-ui-secret.yaml b/addons/kube-image-keeper/templates/registry-ui-secret.yaml new file mode 100644 index 000000000..de4b3cd18 --- /dev/null +++ b/addons/kube-image-keeper/templates/registry-ui-secret.yaml @@ -0,0 +1,16 @@ +{{- if .Values.registryUI.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry-ui-basic-auth + annotations: + "helm.sh/resource-policy": "keep" +type: kubernetes.io/basic-auth +stringData: + {{- $secretName := printf "%s-%s" (include "kube-image-keeper.fullname" .) "registry-ui-basic-auth" }} + {{- $secretData := (get (lookup "v1" "Secret" .Release.Namespace $secretName) "data") | default dict }} + # set $password to existing secret data or generate a random one when it does not exists + {{- $password := (get $secretData "password" | b64dec) | default (randAlphaNum 32) }} + username: {{ .Values.registryUI.auth.username }} + password: {{ .Values.registryUI.auth.password | default $password }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/s3-registry-keys.yaml b/addons/kube-image-keeper/templates/s3-registry-keys.yaml new file mode 100644 index 000000000..8954dea19 --- /dev/null +++ b/addons/kube-image-keeper/templates/s3-registry-keys.yaml @@ -0,0 +1,22 @@ +{{- if or .Values.minio.enabled (and (not (empty .Values.registry.persistence.s3)) (empty .Values.registry.persistence.s3ExistingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: kube-image-keeper-s3-registry-keys + annotations: + "helm.sh/resource-policy": "keep" +type: Opaque +stringData: + {{- if .Values.minio.enabled }} + {{- $secretName := "kube-image-keeper-s3-registry-keys" }} + {{- $secretData := (get (lookup "v1" "Secret" .Release.Namespace $secretName) "data") | default dict }} + # set $accessKey $secretKey to existing secret data or generate random ones when they do not exist + {{- $accessKey := (get $secretData "accessKey" | b64dec) | default (randAlphaNum 16) }} + {{- $secretKey := (get $secretData "secretKey" | b64dec) | default (randAlphaNum 32) }} + accessKey: {{ $accessKey }} + secretKey: {{ $secretKey }} + {{- else }} + accessKey: {{ .Values.registry.persistence.s3.accesskey }} + secretKey: {{ .Values.registry.persistence.s3.secretkey }} + {{- end }} +{{- end }} diff --git a/addons/kube-image-keeper/templates/service.yaml b/addons/kube-image-keeper/templates/service.yaml new file mode 100644 index 000000000..a8a05472d --- /dev/null +++ b/addons/kube-image-keeper/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-registry + labels: + {{- include "kube-image-keeper.registry-labels" . | nindent 4 }} +spec: + type: {{ .Values.registry.service.type }} + ports: + - name: docker-registry + port: 5000 + targetPort: 5000 + - name: registry-metrics + port: 5001 + targetPort: 5001 + selector: + {{- include "kube-image-keeper.registry-selectorLabels" . | nindent 4 }} diff --git a/addons/kube-image-keeper/templates/serviceaccount.yaml b/addons/kube-image-keeper/templates/serviceaccount.yaml new file mode 100644 index 000000000..d9a774683 --- /dev/null +++ b/addons/kube-image-keeper/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kube-image-keeper.serviceAccountName" . }} + labels: + {{- include "kube-image-keeper.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/addons/kube-image-keeper/templates/webhook-certificate.yaml b/addons/kube-image-keeper/templates/webhook-certificate.yaml new file mode 100644 index 000000000..f047cc940 --- /dev/null +++ b/addons/kube-image-keeper/templates/webhook-certificate.yaml @@ -0,0 +1,25 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-serving-cert +spec: + dnsNames: + - {{ include "kube-image-keeper.fullname" . }}-webhook.{{ .Release.Namespace }}.svc + - {{ include "kube-image-keeper.fullname" . }}-webhook.{{ .Release.Namespace }}.svc.cluster.local + secretName: {{ include "kube-image-keeper.fullname" . }}-webhook-server-cert + issuerRef: + {{- if .Values.controllers.webhook.createCertificateIssuer }} + kind: Issuer + name: {{ include "kube-image-keeper.fullname" . }}-selfsigned-issuer + {{- else -}} + {{- toYaml .Values.controllers.webhook.certificateIssuerRef | nindent 4 }} + {{- end }} +--- +{{- if .Values.controllers.webhook.createCertificateIssuer }} +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-selfsigned-issuer +spec: + selfSigned: {} +{{- end -}} diff --git a/addons/kube-image-keeper/templates/webhook-service.yaml b/addons/kube-image-keeper/templates/webhook-service.yaml new file mode 100644 index 000000000..fa2f73757 --- /dev/null +++ b/addons/kube-image-keeper/templates/webhook-service.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kube-image-keeper.fullname" . }}-webhook +spec: + ports: + - port: 443 + targetPort: 9443 + selector: +{{- include "kube-image-keeper.controllers-selectorLabels" . | nindent 4 }} diff --git a/addons/kube-image-keeper/values.yaml b/addons/kube-image-keeper/values.yaml new file mode 100644 index 000000000..939cde739 --- /dev/null +++ b/addons/kube-image-keeper/values.yaml @@ -0,0 +1,373 @@ +# Default values for kube-image-keeper. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Delay in days before deleting an unused CachedImage +cachedImagesExpiryDelay: 30 +# -- List of architectures to put in cache +architectures: [amd64] +# -- Insecure registries to allow to cache and proxify images from +insecureRegistries: [] +# -- Root certificate authorities to trust +rootCertificateAuthorities: {} + # secretName: some-secret + # keys: [] + +controllers: + # Maximum number of CachedImages that can be handled and reconciled at the same time (put or remove from cache) + maxConcurrentCachedImageReconciles: 3 + # -- Number of controllers + replicas: 2 + image: + # -- Controller image repository. Also available: `quay.io/enix/kube-image-keeper` + repository: ghcr.io/enix/kube-image-keeper + # -- Controller image pull policy + pullPolicy: IfNotPresent + # -- Controller image tag. Default chart appVersion + tag: "" + # -- Controller logging verbosity + verbosity: INFO + # -- Specify secrets to be used when pulling controller image + imagePullSecrets: [] + # -- Annotations to add to the controller pod + podAnnotations: {} + # -- Security context for the controller pod + podSecurityContext: {} + # -- Security context for containers of the controller pod + securityContext: {} + # -- Node selector for the controller pod + nodeSelector: {} + # -- Toleration for the controller pod + tolerations: [] + # -- Set the PriorityClassName for the controller pod + priorityClassName: "" + pdb: + # -- Create a PodDisruptionBudget for the controllers + create: false + # -- Minimum available pods + minAvailable: 1 + # -- Maximum unavailable pods + maxUnavailable: "" + # -- Affinity for the controller pod + affinity: {} + # -- Extra env variables for the controllers pod + env: [] + # -- Readiness probe definition for the controllers pod + readinessProbe: + httpGet: + path: /readyz + port: 8081 + # -- Liveness probe definition for the controllers pod + livenessProbe: + httpGet: + path: /healthz + port: 8081 + resources: + requests: + # -- Cpu requests for the controller pod + cpu: "50m" + # -- Memory requests for the controller pod + memory: "50Mi" + limits: + # -- Cpu limits for the controller pod + cpu: "1" + # -- Memory limits for the controller pod + memory: "512Mi" + webhook: + # -- Don't enable image caching for pods scheduled into these namespaces + ignoredNamespaces: [] + # -- Don't enable image caching if the image match the following regexes + ignoredImages: [] + # -- Don't enable image caching if the image is configured with imagePullPolicy: Always + ignorePullPolicyAlways: true + # -- If true, create the issuer used to issue the webhook certificate + createCertificateIssuer: true + # -- Issuer reference to issue the webhook certificate, ignored if createCertificateIssuer is true + certificateIssuerRef: + kind: Issuer + name: kube-image-keeper-selfsigned-issuer + objectSelector: + # -- Run the webhook if the object has matching labels. (See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#labelselectorrequirement-v1-meta) + matchExpressions: [] + podMonitor: + # -- Should a PodMonitor object be installed to scrape kuik controller metrics. For prometheus-operator (kube-prometheus) users. + create: false + # -- Target scrape interval set in the PodMonitor + scrapeInterval: 60s + # -- Target scrape timeout set in the PodMonitor + scrapeTimeout: 30s + # -- Additional labels to add to PodMonitor objects + extraLabels: {} + # -- Relabel config for the PodMonitor, see: https://coreos.com/operators/prometheus/docs/latest/api.html#relabelconfig + relabelings: [] + +proxy: + image: + # -- Proxy image repository. Also available: `quay.io/enix/kube-image-keeper` + repository: ghcr.io/enix/kube-image-keeper + # -- Proxy image pull policy + pullPolicy: IfNotPresent + # -- Proxy image tag. Default chart appVersion + tag: "" + # -- whether to run the proxy daemonset in hostNetwork mode + hostNetwork: false + # -- hostPort used for the proxy pod + hostPort: 7439 + # -- hostIp used for the proxy pod + hostIp: "127.0.0.1" + # -- metricsPort used for the proxy pod (to expose prometheus metrics) + metricsPort: 8080 + # -- Verbosity level for the proxy pod + verbosity: 1 + # -- Specify secrets to be used when pulling proxy image + imagePullSecrets: [] + # -- Annotations to add to the proxy pod + podAnnotations: {} + # -- Security context for the proxy pod + podSecurityContext: {} + # -- Security context for containers of the proxy pod + securityContext: {} + # -- Node selector for the proxy pod + nodeSelector: {} + # -- Toleration for the proxy pod + tolerations: + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/disk-pressure + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/memory-pressure + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/pid-pressure + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/unschedulable + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/network-unavailable + operator: Exists + # -- Set the PriorityClassName for the proxy pod + priorityClassName: system-node-critical + # -- Affinity for the proxy pod + affinity: {} + # -- Extra env variables for the proxy pod + env: [] + # -- Readiness probe definition for the proxy pod + readinessProbe: + httpGet: + path: /readyz + port: 7439 + # -- Liveness probe definition for the proxy pod + livenessProbe: + httpGet: + path: /healthz + port: 7439 + resources: + requests: + # -- Cpu requests for the proxy pod + cpu: "50m" + # -- Memory requests for the proxy pod + memory: "50Mi" + limits: + # -- Cpu limits for the proxy pod + cpu: "1" + # -- Memory limits for the proxy pod + memory: "512Mi" + podMonitor: + # -- Should a PodMonitor object be installed to scrape kuik proxy metrics. For prometheus-operator (kube-prometheus) users. + create: false + # -- Target scrape interval set in the PodMonitor + scrapeInterval: 60s + # -- Target scrape timeout set in the PodMonitor + scrapeTimeout: 30s + # -- Additional labels to add to PodMonitor objects + extraLabels: {} + # -- Relabel config for the PodMonitor, see: https://coreos.com/operators/prometheus/docs/latest/api.html#relabelconfig + relabelings: [] + kubeApiRateLimits: {} + # -- Try higher values if there's a lot of CRDs installed in the cluster and proxy start takes a long time because of throttling + # qps: 5 + # burst: 10 + +registry: + image: + # -- Registry image repository + repository: registry + # -- Registry image pull policy + pullPolicy: IfNotPresent + # -- Registry image tag + tag: "2.8" + # -- Number of replicas for the registry pod + replicas: 1 + persistence: + # -- AccessMode for persistent volume + accessModes: ReadWriteOnce + # -- If true, enable persistent storage (ignored when using minio or S3) + enabled: false + # -- StorageClass for persistent volume + storageClass: null + # -- Registry persistent volume size + size: 20Gi + # -- External S3 configuration (needed only if you don't enable minio) (see https://github.com/docker/docs/blob/main/registry/storage-drivers/s3.md) + s3: {} + s3ExistingSecret: "" + # -- Disable blobs redirection to S3 bucket (useful if your S3 instance is not accessible from kubelet) + disableS3Redirections: false + garbageCollection: + # -- Garbage collector cron schedule. Use standard crontab format. + schedule: "0 0 * * 0" + # -- If true, delete untagged manifests. Default to false since there is a known bug in **docker distribution** garbage collect job. + deleteUntagged: false + image: + # -- Cronjob image repository + repository: bitnami/kubectl + # -- Cronjob image pull policy + pullPolicy: IfNotPresent + # -- Cronjob image tag. Default 'latest' + tag: "latest" + service: + # -- Registry service type + type: ClusterIP + # -- A secret used to sign state that may be stored with the client to protect against tampering, generated if empty (see https://github.com/distribution/distribution/blob/main/docs/configuration.md#http) + httpSecret: "" + # -- Extra env variables for the registry pod + env: [] + # -- Readiness probe definition for the registry pod + readinessProbe: + httpGet: + path: /v2/ + port: 5000 + # -- Liveness probe definition for the proxy pod + livenessProbe: + httpGet: + path: /v2/ + port: 5000 + resources: + requests: + # -- Cpu requests for the registry pod + cpu: "50m" + # -- Memory requests for the registry pod + memory: "256Mi" + limits: + # -- Cpu limits for the registry pod + cpu: "1" + # -- Memory limits for the registry pod + memory: "1Gi" + # -- Specify secrets to be used when pulling registry image + imagePullSecrets: [] + # -- Annotations to add to the registry pod + podAnnotations: {} + # -- Security context for the registry pod + podSecurityContext: {} + # -- Security context for containers of the registry pod + securityContext: {} + # -- Node selector for the registry pod + nodeSelector: {} + # -- Toleration for the registry pod + tolerations: [] + # -- Set the PriorityClassName for the registry pod + priorityClassName: "" + # -- Affinity for the registry pod + affinity: {} + pdb: + # -- Create a PodDisruptionBudget for the registry + create: false + # -- Minimum available pods + minAvailable: 1 + # -- Maximum unavailable pods + maxUnavailable: "" + serviceMonitor: + # -- Should a ServiceMonitor object be installed to scrape kuik registry metrics. For prometheus-operator (kube-prometheus) users. + create: false + # -- Target scrape interval set in the ServiceMonitor + scrapeInterval: 60s + # -- Target scrape timeout set in the ServiceMonitor + scrapeTimeout: 30s + # -- Additional labels to add to ServiceMonitor objects + extraLabels: {} + # -- Relabel config for the ServiceMonitor, see: https://coreos.com/operators/prometheus/docs/latest/api.html#relabelconfig + relabelings: [] + serviceAccount: + # -- Annotations to add to the servicateAccount + annotations: {} + +registryUI: + # -- If true, enable the registry user interface + enabled: false + image: + # -- Registry UI image repository + repository: parabuzzle/craneoperator + # -- Registry UI image pull policy + pullPolicy: IfNotPresent + # -- Registry UI image tag + tag: "2.2.5" + auth: + # -- Registry UI username + username: "admin" + # -- Registry UI password + password: "" + # -- CPU / Memory resources requests / limits for the registry UI pod + resources: {} + # -- Specify secrets to be used when pulling registry UI image + imagePullSecrets: [] + # -- Annotations to add to the registry UI pod + podAnnotations: {} + # -- Security context for the registry UI pod + podSecurityContext: {} + # -- Security context for containers of the registry UI pod + securityContext: {} + # -- Node selector for the registry UI pod + nodeSelector: {} + # -- Toleration for the registry UI pod + tolerations: [] + # -- Affinity for the registry UI pod + affinity: {} + +minio: + # -- If true, install minio as a local storage backend for the registry + enabled: false + fullnameOverride: "kube-image-keeper-minio" + mode: distributed + provisioning: + enabled: true + buckets: + - name: registry + usersExistingSecrets: + - kube-image-keeper-minio-registry-users + extraVolumes: + - name: registry-keys + secret: + defaultMode: 420 + secretName: kube-image-keeper-s3-registry-keys + extraVolumeMounts: + - name: registry-keys + mountPath: /opt/bitnami/minio/svcacct/registry/ + extraCommands: + - | + (mc admin user svcacct info provisioning $(cat /opt/bitnami/minio/svcacct/registry/accessKey) 2> /dev/null || + mc admin user svcacct add + --access-key "$(cat /opt/bitnami/minio/svcacct/registry/accessKey)" + --secret-key "$(cat /opt/bitnami/minio/svcacct/registry/secretKey)" + provisioning registry) > /dev/null + +serviceAccount: + # -- Annotations to add to the servicateAccount + annotations: {} + # -- Name of the serviceAccount + name: "" + +psp: + # -- If True, create the PodSecurityPolicy + create: false diff --git a/vendored-charts b/vendored-charts index 050e5dfe0..7fd8c0ac4 100644 --- a/vendored-charts +++ b/vendored-charts @@ -25,3 +25,4 @@ addons/sagemaker-chart addons/sfn-chart addons/sns-chart addons/sqs-chart +addons/keda-http-add-on \ No newline at end of file