diff --git a/infrastructure/base/gapi/platform-public-gateway.yaml b/infrastructure/base/gapi/example-public-gateway.yaml
similarity index 81%
rename from infrastructure/base/gapi/platform-public-gateway.yaml
rename to infrastructure/base/gapi/example-public-gateway.yaml
index 3e17d926..e77b6320 100644
--- a/infrastructure/base/gapi/platform-public-gateway.yaml
+++ b/infrastructure/base/gapi/example-public-gateway.yaml
@@ -1,3 +1,6 @@
+# This manifest is not deployed. This is just an example of how to create a public gateway for the platform.
+# It uses cert-manager to provision a certificate for the gateway. The certificate is referenced in the gateway spec.
+
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
@@ -12,7 +15,6 @@ spec:
service.beta.kubernetes.io/aws-load-balancer-name: "ogenki-platform-public-gateway"
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance
service.beta.kubernetes.io/aws-load-balancer-type: "external"
- external-dns.alpha.kubernetes.io/hostname: arc-webhook.${domain_name}
listeners:
- name: http
hostname: "*.${domain_name}"
diff --git a/infrastructure/base/gapi/kustomization.yaml b/infrastructure/base/gapi/kustomization.yaml
index 3099c101..27a2d888 100644
--- a/infrastructure/base/gapi/kustomization.yaml
+++ b/infrastructure/base/gapi/kustomization.yaml
@@ -5,4 +5,3 @@ namespace: infrastructure
resources:
- platform-private-gateway.yaml
- platform-private-gateway-certificate.yaml
- - platform-public-gateway.yaml
diff --git a/observability/base/grafana-operator/helmrelease.yaml b/observability/base/grafana-operator/helmrelease.yaml
index 8c796c32..335617c1 100644
--- a/observability/base/grafana-operator/helmrelease.yaml
+++ b/observability/base/grafana-operator/helmrelease.yaml
@@ -23,7 +23,7 @@ spec:
values:
resources:
limits:
- cpu: 100m
+ cpu: 500m
memory: 100Mi
requests:
cpu: 100m
diff --git a/observability/base/victoria-metrics-k8s-stack/helmrelease-vmsingle.yaml b/observability/base/victoria-metrics-k8s-stack/helmrelease-vmsingle.yaml
index 39c5eff8..9d7ace8c 100644
--- a/observability/base/victoria-metrics-k8s-stack/helmrelease-vmsingle.yaml
+++ b/observability/base/victoria-metrics-k8s-stack/helmrelease-vmsingle.yaml
@@ -35,3 +35,20 @@ spec:
storage: 10Gi
extraArgs:
maxLabelsPerTimeseries: "50"
+ # Todo authentication with Zitadel. Currently using admin user
+ # grafana:
+ # grafana.ini:
+ # server:
+ # root_url: "https://grafana.priv.${domain_name}"
+ # domain: "grafana.priv.${domain_name}"
+ # auth.generic_auth:
+ # enabled: true
+ # name: "Zitadel"
+ # allow_sign_up: true
+ # client_id: "293437355073802541"
+ # client_secret: "3XPQdOtQedxEnAjaTbxsnQ2Fc0WT15rKU5nsgSWYzgktdPHm82whbzfu01J0c0ba"
+ # scopes: "openid profile email"
+ # auth_url: "https://auth.${domain_name}/oauth/v2/authorize"
+ # token_url: "https://auth.${domain_name}/oauth/v2/token"
+ # api_url: "https://auth.${domain_name}/oidc/v1/userinfo"
+ # # role_attribute_path: "contains(groups[*], 'admin-group') && 'Admin' || 'Viewer'"
diff --git a/security/base/cert-manager/vault-clusterissuer.yaml b/security/base/cert-manager/vault-clusterissuer.yaml
index 2cfc2829..1faf8be3 100644
--- a/security/base/cert-manager/vault-clusterissuer.yaml
+++ b/security/base/cert-manager/vault-clusterissuer.yaml
@@ -11,7 +11,7 @@ spec:
auth:
appRole:
path: approle
- roleId: cbfb2f59-f08f-fee6-e364-be12ff4b4a9f # !! This value changes each time I recreate the whole platform
+ roleId: a8588869-b29e-8190-47cb-23c4cf3c2130 # !! This value changes each time I recreate the whole platform
secretRef:
name: cert-manager-vault-approle
key: secret_id
diff --git a/security/base/zitadel/certificate.yaml b/security/base/zitadel/certificate.yaml
index cd6ffe90..94249ab7 100644
--- a/security/base/zitadel/certificate.yaml
+++ b/security/base/zitadel/certificate.yaml
@@ -6,11 +6,10 @@ spec:
secretName: zitadel-certificate
duration: 2160h # 90d
renewBefore: 360h # 15d
- commonName: zitadel.priv.${domain_name}
+ commonName: auth.${domain_name}
dnsNames:
- - zitadel.priv.${domain_name}
- - sso.priv.${domain_name}
+ - auth.${domain_name}
issuerRef:
- name: vault
+ name: letsencrypt-prod
kind: ClusterIssuer
group: cert-manager.io
diff --git a/security/base/zitadel/gateway.yaml b/security/base/zitadel/gateway.yaml
index edcaa710..051a2535 100644
--- a/security/base/zitadel/gateway.yaml
+++ b/security/base/zitadel/gateway.yaml
@@ -8,12 +8,12 @@ spec:
annotations:
service.beta.kubernetes.io/aws-load-balancer-name: "ogenki-zitadel-gateway"
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance
- service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
+ service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing"
service.beta.kubernetes.io/aws-load-balancer-type: "external"
- external-dns.alpha.kubernetes.io/hostname: "zitadel.priv.${domain_name},sso.priv.${domain_name}"
+ external-dns.alpha.kubernetes.io/hostname: "auth.${domain_name}"
listeners:
- - name: http
- hostname: "*.priv.${domain_name}"
+ - name: auth
+ hostname: "auth.${domain_name}"
port: 443
protocol: TLS
allowedRoutes:
diff --git a/security/base/zitadel/helmrelease.yaml b/security/base/zitadel/helmrelease.yaml
index 2a818ffa..82c31ea4 100644
--- a/security/base/zitadel/helmrelease.yaml
+++ b/security/base/zitadel/helmrelease.yaml
@@ -4,6 +4,7 @@ metadata:
name: zitadel
spec:
interval: 30m
+ timeout: 30m
driftDetection:
mode: enabled
chart:
@@ -27,7 +28,7 @@ spec:
Format: json
ExternalPort: 443
ExternalSecure: true
- ExternalDomain: "zitadel.priv.${domain_name}"
+ ExternalDomain: "auth.${domain_name}"
TLS:
Enabled: true
KeyPath: /tls/tls.key
diff --git a/security/base/zitadel/network-policy.yaml b/security/base/zitadel/network-policy.yaml
index 8f36ad90..4475db5a 100644
--- a/security/base/zitadel/network-policy.yaml
+++ b/security/base/zitadel/network-policy.yaml
@@ -3,7 +3,7 @@ kind: CiliumNetworkPolicy
metadata:
name: zitadel
spec:
- description: "Allow internal traffic to the Zitadel service."
+ description: "Limit traffic to and from the Zitadel application"
endpointSelector:
matchLabels:
k8s:app.kubernetes.io/name: zitadel
@@ -22,8 +22,6 @@ spec:
- world
toPorts:
- ports:
- - port: "80"
- protocol: TCP
- port: "443"
protocol: TCP
- toEndpoints:
diff --git a/security/base/zitadel/sqlinstance.yaml b/security/base/zitadel/sqlinstance.yaml
index bc550910..6d533d3d 100644
--- a/security/base/zitadel/sqlinstance.yaml
+++ b/security/base/zitadel/sqlinstance.yaml
@@ -10,7 +10,7 @@ spec:
createSuperuser: true
objectStoreRecovery:
bucketName: "eu-west-3-ogenki-cnpg-backups"
- path: "zitadel-20241109"
+ path: "zitadel-20241111"
backup:
schedule: "0 0 * * *"
bucketName: "eu-west-3-ogenki-cnpg-backups"
diff --git a/security/base/zitadel/tlsroute.yaml b/security/base/zitadel/tlsroute.yaml
index 14426e93..91d4fc83 100644
--- a/security/base/zitadel/tlsroute.yaml
+++ b/security/base/zitadel/tlsroute.yaml
@@ -6,7 +6,7 @@ spec:
parentRefs:
- name: zitadel
hostnames:
- - "zitadel.priv.${domain_name}"
+ - "auth.${domain_name}"
rules:
- backendRefs:
- name: zitadel
diff --git a/terraform/eks/README.md b/terraform/eks/README.md
index 3454ba07..a100218c 100644
--- a/terraform/eks/README.md
+++ b/terraform/eks/README.md
@@ -38,6 +38,17 @@ karpenter_limits = {
memory = "64Gi"
}
}
+
+# Optional if an external OIDC provider should be used to authenticate users
+cluster_identity_providers = {
+ zitadel = {
+ client_id = "702vqsrjicklgb7c5b7b50i1gc"
+ issuer_url = "https://auth.cloud.ogenki.io"
+ username_claim = "email"
+ groups_claim = "groups"
+ }
+}
+
```
3. Apply with
@@ -106,8 +117,8 @@ tofu destroy --var-file variables.tfvars
| Name | Source | Version |
|------|--------|---------|
| [eks](#module\_eks) | terraform-aws-modules/eks/aws | ~> 20 |
-| [irsa\_crossplane](#module\_irsa\_crossplane) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.47.1 |
-| [irsa\_ebs\_csi\_driver](#module\_irsa\_ebs\_csi\_driver) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.47.1 |
+| [irsa\_crossplane](#module\_irsa\_crossplane) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.48.0 |
+| [irsa\_ebs\_csi\_driver](#module\_irsa\_ebs\_csi\_driver) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | 5.48.0 |
| [karpenter](#module\_karpenter) | terraform-aws-modules/eks/aws//modules/karpenter | ~> 20.0 |
## Resources
@@ -147,6 +158,7 @@ tofu destroy --var-file variables.tfvars
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| [cilium\_version](#input\_cilium\_version) | Cilium cluster version | `string` | `"1.16.2"` | no |
+| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. | `any` | `{}` | no |
| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster to be created | `string` | n/a | yes |
| [cluster\_version](#input\_cluster\_version) | k8s cluster version | `string` | `"1.31"` | no |
| [ebs\_csi\_driver\_chart\_version](#input\_ebs\_csi\_driver\_chart\_version) | EBS CSI Driver Helm chart version | `string` | `"2.25.0"` | no |
diff --git a/terraform/eks/iam.tf b/terraform/eks/iam.tf
index 06795e9a..55d3aa10 100644
--- a/terraform/eks/iam.tf
+++ b/terraform/eks/iam.tf
@@ -1,7 +1,7 @@
# AWS permissions for the EBS-CSI-DRIVER
module "irsa_ebs_csi_driver" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
- version = "5.47.1"
+ version = "5.48.0"
role_name = "${var.cluster_name}-ebs_csi_driver"
assume_role_condition_test = "StringLike"
@@ -22,7 +22,7 @@ module "irsa_ebs_csi_driver" {
# AWS permissions for Crossplane
module "irsa_crossplane" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
- version = "5.47.1"
+ version = "5.48.0"
role_name = "${var.cluster_name}-crossplane"
assume_role_condition_test = "StringLike"
@@ -64,6 +64,7 @@ resource "aws_iam_policy" "crossplane_iam" {
"iam:CreatePolicyVersion",
"iam:PutRolePolicy",
"iam:DeletePolicy",
+ "iam:DeletePolicyVersion",
"iam:DeleteRole",
"iam:DetachRolePolicy",
"iam:AttachRolePolicy",
diff --git a/terraform/eks/main.tf b/terraform/eks/main.tf
index 5d4e7101..4b3c8af9 100644
--- a/terraform/eks/main.tf
+++ b/terraform/eks/main.tf
@@ -45,6 +45,8 @@ module "eks" {
# }
#}
+ cluster_identity_providers = var.cluster_identity_providers
+
vpc_id = data.aws_vpc.selected.id
subnet_ids = data.aws_subnets.private.ids
control_plane_subnet_ids = data.aws_subnets.intra.ids
diff --git a/terraform/eks/variables.tf b/terraform/eks/variables.tf
index e308320b..cb6150dc 100644
--- a/terraform/eks/variables.tf
+++ b/terraform/eks/variables.tf
@@ -33,6 +33,12 @@ variable "iam_role_additional_policies" {
default = {}
}
+variable "cluster_identity_providers" {
+ description = "Map of cluster identity provider configurations to enable for the cluster."
+ type = any
+ default = {}
+}
+
variable "cilium_version" {
description = "Cilium cluster version"
default = "1.16.2"
diff --git a/tooling/base/harbor/sqlinstance.yaml b/tooling/base/harbor/sqlinstance.yaml
index 188b1af1..8cb34ffe 100644
--- a/tooling/base/harbor/sqlinstance.yaml
+++ b/tooling/base/harbor/sqlinstance.yaml
@@ -5,11 +5,14 @@ metadata:
spec:
size: "small"
storageGB: 20
+ databases:
+ - name: registry
+ owner: harbor
cnpg:
instances: 1
objectStoreRecovery:
bucketName: "eu-west-3-ogenki-cnpg-backups"
- path: "harbor-20241109"
+ path: "harbor-20241111"
backup:
schedule: "0 1 * * *"
bucketName: "eu-west-3-ogenki-cnpg-backups"
diff --git a/tooling/base/headlamp/externalsecret-zitadel-envvars.yaml b/tooling/base/headlamp/externalsecret-zitadel-envvars.yaml
new file mode 100644
index 00000000..99d8cd79
--- /dev/null
+++ b/tooling/base/headlamp/externalsecret-zitadel-envvars.yaml
@@ -0,0 +1,17 @@
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: headlamp-envvars
+spec:
+ dataFrom:
+ - extract:
+ conversionStrategy: Default
+ key: headlamp/envvars
+ refreshInterval: 20m
+ secretStoreRef:
+ kind: ClusterSecretStore
+ name: clustersecretstore
+ target:
+ creationPolicy: Owner
+ deletionPolicy: Retain
+ name: headlamp-envvars
diff --git a/tooling/base/headlamp/helmrelease.yaml b/tooling/base/headlamp/helmrelease.yaml
index a4a6f6a1..8c7393f9 100644
--- a/tooling/base/headlamp/helmrelease.yaml
+++ b/tooling/base/headlamp/helmrelease.yaml
@@ -17,6 +17,12 @@ spec:
values:
config:
pluginsDir: /build/plugins
+ oidc:
+ secret:
+ create: false
+ externalSecret:
+ enabled: true
+ name: "headlamp-envvars"
initContainers:
- command:
- /bin/sh
diff --git a/tooling/base/headlamp/kustomization.yaml b/tooling/base/headlamp/kustomization.yaml
index a7816fb9..8ed3537b 100644
--- a/tooling/base/headlamp/kustomization.yaml
+++ b/tooling/base/headlamp/kustomization.yaml
@@ -2,5 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: tooling
resources:
+ - externalsecret-zitadel-envvars.yaml
- httproute.yaml
- helmrelease.yaml
+ - rbac-admin.yaml
diff --git a/tooling/base/headlamp/rbac-admin.yaml b/tooling/base/headlamp/rbac-admin.yaml
new file mode 100644
index 00000000..15c574fb
--- /dev/null
+++ b/tooling/base/headlamp/rbac-admin.yaml
@@ -0,0 +1,13 @@
+# Giving me all the perms. Looking for a way to assign to a group instead of a user (Google Groups)
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: admin-user-clusterrolebinding
+subjects:
+ - kind: User
+ name: smaine.kahlouch@ogenki.io
+ apiGroup: rbac.authorization.k8s.io
+roleRef:
+ kind: ClusterRole
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io