diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-editor-clusterrole_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-editor-clusterrole_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..b85e527f786 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-editor-clusterrole_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: stackgres + app.kubernetes.io/instance: editor-clusterrole + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: stackgres + name: stackgres-editor-clusterrole +rules: + - apiGroups: + - stackgres.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-editor-role_rbac.authorization.k8s.io_v1_role.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-editor-role_rbac.authorization.k8s.io_v1_role.yaml new file mode 100644 index 00000000000..ade4f3c625b --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-editor-role_rbac.authorization.k8s.io_v1_role.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: stackgres + app.kubernetes.io/instance: editor-role + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: role + app.kubernetes.io/part-of: stackgres + name: stackgres-editor-role +rules: + - apiGroups: + - stackgres.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-operator_v1_service.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-operator_v1_service.yaml new file mode 100644 index 00000000000..64145f5c607 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-operator_v1_service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + name: stackgres-operator +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + app: stackgres-operator +status: + loadBalancer: {} diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-viewer-clusterrole_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-viewer-clusterrole_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..dcaf582ffa1 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-viewer-clusterrole_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: stackgres + app.kubernetes.io/instance: viewer-clusterrole + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: stackgres + name: stackgres-viewer-clusterrole +rules: + - apiGroups: + - stackgres.io + resources: + - '*' + verbs: + - get + - list + - watch + - apiGroups: + - stackgres.io + resources: + - sgconfigs/status + verbs: + - get diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-viewer-role_rbac.authorization.k8s.io_v1_role.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-viewer-role_rbac.authorization.k8s.io_v1_role.yaml new file mode 100644 index 00000000000..26ecf4efa34 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-viewer-role_rbac.authorization.k8s.io_v1_role.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: stackgres + app.kubernetes.io/instance: viewer-role + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: role + app.kubernetes.io/part-of: stackgres + name: stackgres-viewer-role +rules: + - apiGroups: + - stackgres.io + resources: + - '*' + verbs: + - get + - list + - watch + - apiGroups: + - stackgres.io + resources: + - sgconfigs/status + verbs: + - get diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-admin_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-admin_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..ea602562f8d --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-admin_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,153 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: stackgres-webconsole-admin +rules: + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - apiextensions.k8s.io + resourceNames: + - sgconfigs.stackgres.io + - sgclusters.stackgres.io + - sginstanceprofiles.stackgres.io + - sgpgconfigs.stackgres.io + - sgpoolconfigs.stackgres.io + - sgbackups.stackgres.io + - sgbackupconfigs.stackgres.io + - sgobjectstorages.stackgres.io + - sgdbops.stackgres.io + - sgdistributedlogs.stackgres.io + - sgshardedclusters.stackgres.io + - sgscripts.stackgres.io + - sgstreams.stackgres.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - apiGroups: + - '' + - storage.k8s.io + resources: + - namespaces + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + - batch + resources: + - pods/exec + - configmaps + - secrets + - jobs + verbs: + - create + - apiGroups: + - '' + resources: + - pods/exec + verbs: + - get + - apiGroups: + - '' + - batch + resources: + - configmaps + - secrets + - jobs + verbs: + - delete + - apiGroups: + - '' + resources: + - configmaps + - secrets + verbs: + - patch + - update + - apiGroups: + - '' + - batch + - storage.k8s.io + resources: + - pods + - services + - configmaps + - secrets + - persistentvolumes + - persistentvolumeclaims + - events + - jobs + verbs: + - get + - list + - watch + - apiGroups: + - stackgres.io + resources: + - sgclusters + - sgpgconfigs + - sginstanceprofiles + - sgpoolconfigs + - sgbackupconfigs + - sgbackups + - sgdistributedlogs + - sgdbops + - sgobjectstorages + - sgscripts + - sgshardedclusters + - sgshardedbackups + - sgshardeddbops + - sgstreams + verbs: + - create + - watch + - list + - get + - update + - patch + - delete + - apiGroups: + - stackgres.io + resources: + - sgconfigs + verbs: + - watch + - list + - get + - update + - patch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - get + - list + - watch + - create + - update + - patch + - delete diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-admin_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-admin_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml new file mode 100644 index 00000000000..377736c2329 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-admin_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: stackgres + app.kubernetes.io/instance: webconsole-admin-clusterrolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: stackgres + name: stackgres-webconsole-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: stackgres-webconsole-admin +subjects: + - kind: User + name: admin diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..8b36563d909 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,101 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: stackgres-webconsole-reader +rules: + - apiGroups: + - apiextensions.k8s.io + resourceNames: + - sgconfigs.stackgres.io + - sgclusters.stackgres.io + - sginstanceprofiles.stackgres.io + - sgpgconfigs.stackgres.io + - sgpoolconfigs.stackgres.io + - sgbackups.stackgres.io + - sgbackupconfigs.stackgres.io + - sgobjectstorages.stackgres.io + - sgdbops.stackgres.io + - sgdistributedlogs.stackgres.io + - sgshardedclusters.stackgres.io + - sgscripts.stackgres.io + - sgstreams.stackgres.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - apiGroups: + - '' + - storage.k8s.io + resources: + - namespaces + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + - batch + resources: + - pods/exec + verbs: + - create + - apiGroups: + - '' + resources: + - pods/exec + verbs: + - get + - apiGroups: + - '' + - batch + - storage.k8s.io + resources: + - pods + - services + - configmaps + - secrets + - persistentvolumes + - persistentvolumeclaims + - events + - jobs + verbs: + - get + - list + - watch + - apiGroups: + - stackgres.io + resources: + - sgclusters + - sgpgconfigs + - sginstanceprofiles + - sgpoolconfigs + - sgbackupconfigs + - sgbackups + - sgdistributedlogs + - sgdbops + - sgobjectstorages + - sgscripts + - sgshardedclusters + - sgshardedbackups + - sgshardeddbops + - sgstreams + verbs: + - watch + - list + - get + - apiGroups: + - stackgres.io + resources: + - sgconfigs + verbs: + - watch + - list + - get diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-writer_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-writer_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 00000000000..ae7ec8ee3ef --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres-webconsole-writer_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,127 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: stackgres-webconsole-writer +rules: + - apiGroups: + - apiextensions.k8s.io + resourceNames: + - sgconfigs.stackgres.io + - sgclusters.stackgres.io + - sginstanceprofiles.stackgres.io + - sgpgconfigs.stackgres.io + - sgpoolconfigs.stackgres.io + - sgbackups.stackgres.io + - sgbackupconfigs.stackgres.io + - sgobjectstorages.stackgres.io + - sgdbops.stackgres.io + - sgdistributedlogs.stackgres.io + - sgshardedclusters.stackgres.io + - sgscripts.stackgres.io + - sgstreams.stackgres.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - apiGroups: + - '' + - storage.k8s.io + resources: + - namespaces + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + - batch + resources: + - pods/exec + - configmaps + - secrets + - jobs + verbs: + - create + - apiGroups: + - '' + resources: + - pods/exec + verbs: + - get + - apiGroups: + - '' + - batch + resources: + - configmaps + - secrets + - jobs + verbs: + - delete + - apiGroups: + - '' + resources: + - configmaps + - secrets + verbs: + - patch + - update + - apiGroups: + - '' + - batch + - storage.k8s.io + resources: + - pods + - services + - configmaps + - secrets + - persistentvolumes + - persistentvolumeclaims + - events + - jobs + verbs: + - get + - list + - watch + - apiGroups: + - stackgres.io + resources: + - sgclusters + - sgpgconfigs + - sginstanceprofiles + - sgpoolconfigs + - sgbackupconfigs + - sgbackups + - sgdistributedlogs + - sgdbops + - sgobjectstorages + - sgscripts + - sgshardedclusters + - sgshardedbackups + - sgshardeddbops + - sgstreams + verbs: + - create + - watch + - list + - get + - update + - patch + - delete + - apiGroups: + - stackgres.io + resources: + - sgconfigs + verbs: + - watch + - list + - get + - update + - patch diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.clusterserviceversion.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.clusterserviceversion.yaml new file mode 100644 index 00000000000..d8dbe0b8b0c --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.clusterserviceversion.yaml @@ -0,0 +1,16590 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: "[\n {\n \"apiVersion\": \"stackgres.io/v1\",\n \"kind\"\ + : \"SGBackup\",\n \"metadata\": {\n \"name\": \"demo-backup\",\n \ + \ \"namespace\": \"demo-db\"\n },\n \"spec\": {\n \"managedLifecycle\"\ + : false,\n \"sgCluster\": \"demo-db\"\n }\n },\n {\n \"apiVersion\"\ + : \"stackgres.io/v1\",\n \"kind\": \"SGCluster\",\n \"metadata\": {\n\ + \ \"name\": \"demo-db\",\n \"namespace\": \"demo-db\"\n },\n \ + \ \"spec\": {\n \"instances\": 3,\n \"pods\": {\n \"persistentVolume\"\ + : {\n \"size\": \"20Gi\"\n }\n },\n \"postgres\":\ + \ {\n \"version\": \"latest\"\n }\n }\n },\n {\n \"apiVersion\"\ + : \"stackgres.io/v1\",\n \"kind\": \"SGConfig\",\n \"metadata\": {\n \ + \ \"name\": \"stackgres-operator\"\n },\n \"spec\": {\n \"authentication\"\ + : {\n \"type\": \"jwt\",\n \"user\": \"admin\"\n },\n \ + \ \"containerRegistry\": \"quay.io\",\n \"extensions\": {\n \"\ + repositoryUrls\": [\n \"https://extensions.stackgres.io/postgres/repository\"\ + \n ]\n },\n \"grafana\": {\n \"autoEmbed\": false,\n\ + \ \"datasourceName\": \"Prometheus\",\n \"password\": \"prom-operator\"\ + ,\n \"schema\": \"http\",\n \"user\": \"admin\"\n },\n \ + \ \"imagePullPolicy\": \"IfNotPresent\",\n \"prometheus\": {\n \ + \ \"allowAutobind\": true\n }\n }\n },\n {\n \"apiVersion\":\ + \ \"stackgres.io/v1\",\n \"kind\": \"SGDbOps\",\n \"metadata\": {\n \ + \ \"name\": \"demo-restart-op\",\n \"namespace\": \"demo-db\"\n },\n\ + \ \"spec\": {\n \"op\": \"restart\",\n \"sgCluster\": \"demo-db\"\ + \n }\n },\n {\n \"apiVersion\": \"stackgres.io/v1\",\n \"kind\":\ + \ \"SGDistributedLogs\",\n \"metadata\": {\n \"name\": \"distributedlogs\"\ + ,\n \"namespace\": \"demo-db\"\n },\n \"spec\": {\n \"persistentVolume\"\ + : {\n \"size\": \"20Gi\"\n }\n }\n },\n {\n \"apiVersion\"\ + : \"stackgres.io/v1\",\n \"kind\": \"SGInstanceProfile\",\n \"metadata\"\ + : {\n \"name\": \"size-m\",\n \"namespace\": \"demo-db\"\n },\n\ + \ \"spec\": {\n \"cpu\": \"4\",\n \"memory\": \"8Gi\"\n }\n\ + \ },\n {\n \"apiVersion\": \"stackgres.io/v1\",\n \"kind\": \"SGPoolingConfig\"\ + ,\n \"metadata\": {\n \"name\": \"poolconfig\",\n \"namespace\"\ + : \"demo-db\"\n },\n \"spec\": {\n \"pgBouncer\": {\n \"pgbouncer.ini\"\ + : {\n \"databases\": {\n \"demo\": {\n \"dbname\"\ + : \"demo\",\n \"pool_size\": 400,\n \"reserve_pool\"\ + : 5\n },\n \"postgres\": {\n \"dbname\":\ + \ \"postgres\",\n \"pool_size\": 10,\n \"reserve_pool\"\ + : 5\n }\n },\n \"pgbouncer\": {\n \"\ + default_pool_size\": \"100\",\n \"max_client_conn\": \"2000\",\n\ + \ \"pool_mode\": \"session\"\n }\n }\n }\n \ + \ }\n },\n {\n \"apiVersion\": \"stackgres.io/v1\",\n \"kind\": \"\ + SGPostgresConfig\",\n \"metadata\": {\n \"name\": \"pgconfig\",\n \ + \ \"namespace\": \"demo-db\"\n },\n \"spec\": {\n \"postgresVersion\"\ + : \"15\",\n \"postgresql.conf\": {\n \"effective_cache_size\": \"\ + 5GB\",\n \"hot_standby_feedback\": \"on\",\n \"log_min_duration_statement\"\ + : \"1000\",\n \"maintenance_work_mem\": \"2GB\",\n \"max_connections\"\ + : \"600\",\n \"shared_buffers\": \"3GB\",\n \"work_mem\": \"16MB\"\ + \n }\n }\n },\n {\n \"apiVersion\": \"stackgres.io/v1\",\n \"\ + kind\": \"SGScript\",\n \"metadata\": {\n \"name\": \"create-db-script\"\ + ,\n \"namespace\": \"demo-db\"\n },\n \"spec\": {\n \"continueOnError\"\ + : false,\n \"managedVersions\": true,\n \"scripts\": [\n {\n\ + \ \"name\": \"create-demo-database\",\n \"script\": \"CREATE\ + \ DATABASE demo WITH OWNER postgres;\\n\"\n }\n ]\n }\n },\n\ + \ {\n \"apiVersion\": \"stackgres.io/v1\",\n \"kind\": \"SGScript\",\n\ + \ \"metadata\": {\n \"name\": \"stream-db\",\n \"namespace\": \"\ + demo-db\"\n },\n \"spec\": {\n \"source\": {\n \"sgCluster\"\ + : {\n \"name\": \"demo-db\"\n },\n \"type\": \"SGCluster\"\ + \n },\n \"target\": {\n \"cloudEvent\": {\n \"http\"\ + : {\n \"url\": \"http://cloudevents-nodejs\"\n }\n \ + \ },\n \"type\": \"CloudEvent\"\n }\n }\n },\n {\n \"\ + apiVersion\": \"stackgres.io/v1\",\n \"kind\": \"SGShardedBackup\",\n \ + \ \"metadata\": {\n \"name\": \"demo-backup\",\n \"namespace\": \"\ + demo-db\"\n },\n \"spec\": {\n \"managedLifecycle\": false,\n \ + \ \"sgShardedCluster\": \"demo-shardeddb\"\n }\n },\n {\n \"apiVersion\"\ + : \"stackgres.io/v1\",\n \"kind\": \"SGShardedDbOps\",\n \"metadata\"\ + : {\n \"name\": \"demo-restart-op\",\n \"namespace\": \"demo-db\"\n\ + \ },\n \"spec\": {\n \"op\": \"restart\",\n \"sgShardedCluster\"\ + : \"demo-shardeddb\"\n }\n },\n {\n \"apiVersion\": \"stackgres.io/v1alpha1\"\ + ,\n \"kind\": \"SGShardedCluster\",\n \"metadata\": {\n \"name\"\ + : \"demo-shardeddb\",\n \"namespace\": \"demo-db\"\n },\n \"spec\"\ + : {\n \"coordinator\": {\n \"instances\": 2,\n \"pods\":\ + \ {\n \"persistentVolume\": {\n \"size\": \"10Gi\"\n \ + \ }\n }\n },\n \"database\": \"sharded\",\n \"postgres\"\ + : {\n \"version\": \"15.3\"\n },\n \"shards\": {\n \"\ + clusters\": 3,\n \"instancesPerCluster\": 2,\n \"pods\": {\n \ + \ \"persistentVolume\": {\n \"size\": \"10Gi\"\n \ + \ }\n }\n },\n \"type\": \"citus\"\n }\n },\n {\n \ + \ \"apiVersion\": \"stackgres.io/v1beta1\",\n \"kind\": \"SGObjectStorage\"\ + ,\n \"metadata\": {\n \"name\": \"backupconfig\",\n \"namespace\"\ + : \"demo-db\"\n },\n \"spec\": {\n \"gcs\": {\n \"bucket\"\ + : \"stackgres-backups\",\n \"gcpCredentials\": {\n \"secretKeySelectors\"\ + : {\n \"serviceAccountJSON\": {\n \"key\": \"gcloudkey\"\ + ,\n \"name\": \"backups-gcp\"\n }\n }\n \ + \ }\n },\n \"type\": \"gcs\"\n }\n }\n]" + capabilities: Deep Insights + categories: Database + containerImage: quay.io/stackgres/operator:1.15.0-rc1 + createdAt: '2024-12-04T15:46:43Z' + description: 'The most advanced Postgres Enterprise Platform. + + Fully Open Source. + + ' + operatorhub.io/ui-metadata-max-k8s-version: 1.31.999 + operators.operatorframework.io/builder: operator-sdk-v1.32.0 + operators.operatorframework.io/project_layout: quarkus.javaoperatorsdk.io/v1-alpha + repository: https://gitlab.com/ongresinc/stackgres + name: stackgres.v1.15.0-rc1 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: Handle to a performed (or to be performed, if run manually) backup + displayName: StackGres Backup + kind: SGBackup + name: sgbackups.stackgres.io + specDescriptors: + - description: "The name of the `SGCluster` from which this backup is/will\ + \ be taken.\n\nIf this is a copy of an existing completed backup in\ + \ a different namespace\n the value must be prefixed with the namespace\ + \ of the source backup and a\n dot `.` (e.g. `.`) or have the same value\n if the source backup is also a copy.\n" + displayName: Target SGCluster + path: sgCluster + - description: "Indicate if this backup is not permanent and should be removed\ + \ by the automated\n retention policy. Default is `false`.\n" + displayName: Managed Lifecycle + path: managedLifecycle + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation will continue + until the backup completes or fail. If set to 0 is the same as not being + set. + + + Make sure to set a reasonable high value in order to allow for any unexpected + delays during backup creation (network low bandwidth, disk low throughput + and so forth). + + ' + displayName: Timeout + path: timeout + - description: "Allow to set a timeout for the reconciliation process that\ + \ take place after the backup.\n\nIf not set defaults to 300 (5 minutes).\ + \ If set to 0 it will disable timeout.\n\nFailure of reconciliation\ + \ will not make the backup fail and will be re-tried the next time a\ + \ SGBackup\n or shecduled backup Job take place.\n" + displayName: Reconciliation Timeout + path: reconciliationTimeout + - description: 'The maximum number of retries the backup operation is allowed + to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: `3`. + + ' + displayName: Max Retries + path: maxRetries + statusDescriptors: + - description: 'The name of the backup. + + ' + displayName: Internal Name + path: internalName + - description: 'The path were the backup is stored. + + ' + displayName: Backup Path + path: backupPath + - description: 'Status of the backup. + + ' + displayName: Process Status + path: process.status + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase + - description: 'If the status is `failed` this field will contain a message + indicating the failure reason. + + ' + displayName: Process Failure + path: process.failure + - description: 'Name of the pod assigned to the backup. StackGres utilizes + internally a locking mechanism based on the pod name of the job that + creates the backup. + + ' + displayName: Process Job Pod + path: process.jobPod + - description: 'Status (may be transient) until converging to `spec.managedLifecycle`. + + ' + displayName: Process Managed Lifecycle + path: process.managedLifecycle + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Start time of backup. + + ' + displayName: Process Timing Start + path: process.timing.start + - description: 'End time of backup. + + ' + displayName: Process Timing End + path: process.timing.end + - description: 'Time at which the backup is safely stored in the object + storage. + + ' + displayName: Process Timing Stored + path: process.timing.stored + - description: 'Hostname of the instance where the backup is taken from. + + ' + displayName: Backup Information Hostname + path: backupInformation.hostname + - description: 'Pod where the backup is taken from. + + ' + displayName: Backup Information Source Pod + path: backupInformation.sourcePod + - description: 'Postgres *system identifier* of the cluster this backup + is taken from. + + ' + displayName: Backup Information System Identifier + path: backupInformation.systemIdentifier + - description: 'Postgres version of the server where the backup is taken + from. + + ' + displayName: Backup Information Postgres Version + path: backupInformation.postgresVersion + - description: 'Data directory where the backup is taken from. + + ' + displayName: Backup Information Pg Data + path: backupInformation.pgData + - description: 'Size (in bytes) of the uncompressed backup. + + ' + displayName: Backup Information Size Uncompressed + path: backupInformation.size.uncompressed + - description: 'Size (in bytes) of the compressed backup. + + ' + displayName: Backup Information Size Compressed + path: backupInformation.size.compressed + - description: 'LSN of when the backup started. + + ' + displayName: Backup Information Lsn Start + path: backupInformation.lsn.start + - description: 'LSN of when the backup finished. + + ' + displayName: Backup Information Lsn End + path: backupInformation.lsn.end + - description: 'WAL segment file name when the backup was started. + + ' + displayName: Backup Information Start Wal File + path: backupInformation.startWalFile + - description: 'Backup timeline. + + ' + displayName: Backup Information Timeline + path: backupInformation.timeline + - displayName: Backup Information Control Data Pg_control Version Number + path: backupInformation.controlData.pg_control version number + - displayName: Backup Information Control Data Catalog Version Number + path: backupInformation.controlData.Catalog version number + - displayName: Backup Information Control Data Database System Identifier + path: backupInformation.controlData.Database system identifier + - displayName: Backup Information Control Data Database Cluster State + path: backupInformation.controlData.Database cluster state + - displayName: Backup Information Control Data Pg_control Last Modified + path: backupInformation.controlData.pg_control last modified + - displayName: Backup Information Control Data Latest Checkpoint Location + path: backupInformation.controlData.Latest checkpoint location + - displayName: Backup Information Control Data Latest Checkpoint's REDO + Location + path: backupInformation.controlData.Latest checkpoint's REDO location + - displayName: Backup Information Control Data Latest Checkpoint's REDOWAL + File + path: backupInformation.controlData.Latest checkpoint's REDO WAL file + - displayName: Backup Information Control Data Latest Checkpoint's Time + Line ID + path: backupInformation.controlData.Latest checkpoint's TimeLineID + - displayName: Backup Information Control Data Latest Checkpoint's Prev + Time Line ID + path: backupInformation.controlData.Latest checkpoint's PrevTimeLineID + - displayName: Backup Information Control Data Latest Checkpoint's Full_page_writes + path: backupInformation.controlData.Latest checkpoint's full_page_writes + - displayName: Backup Information Control Data Latest Checkpoint's Next + XID + path: backupInformation.controlData.Latest checkpoint's NextXID + - displayName: Backup Information Control Data Latest Checkpoint's Next + OID + path: backupInformation.controlData.Latest checkpoint's NextOID + - displayName: Backup Information Control Data Latest Checkpoint's Next + Multi Xact Id + path: backupInformation.controlData.Latest checkpoint's NextMultiXactId + - displayName: Backup Information Control Data Latest Checkpoint's Next + Multi Offset + path: backupInformation.controlData.Latest checkpoint's NextMultiOffset + - displayName: Backup Information Control Data Latest Checkpoint's Oldest + XID + path: backupInformation.controlData.Latest checkpoint's oldestXID + - displayName: Backup Information Control Data Latest Checkpoint's Oldest + XI D's DB + path: backupInformation.controlData.Latest checkpoint's oldestXID's DB + - displayName: Backup Information Control Data Latest Checkpoint's Oldest + Active XID + path: backupInformation.controlData.Latest checkpoint's oldestActiveXID + - displayName: Backup Information Control Data Latest Checkpoint's Oldest + Multi Xid + path: backupInformation.controlData.Latest checkpoint's oldestMultiXid + - displayName: Backup Information Control Data Latest Checkpoint's Oldest + Multi's DB + path: backupInformation.controlData.Latest checkpoint's oldestMulti's + DB + - displayName: Backup Information Control Data Latest Checkpoint's Oldest + Commit Ts Xid + path: backupInformation.controlData.Latest checkpoint's oldestCommitTsXid + - displayName: Backup Information Control Data Latest Checkpoint's Newest + Commit Ts Xid + path: backupInformation.controlData.Latest checkpoint's newestCommitTsXid + - displayName: Backup Information Control Data Time Of Latest Checkpoint + path: backupInformation.controlData.Time of latest checkpoint + - displayName: Backup Information Control Data Fake LSN Counter For Unlogged + Rels + path: backupInformation.controlData.Fake LSN counter for unlogged rels + - displayName: Backup Information Control Data Minimum Recovery Ending Location + path: backupInformation.controlData.Minimum recovery ending location + - displayName: Backup Information Control Data Min Recovery Ending Loc's + Timeline + path: backupInformation.controlData.Min recovery ending loc's timeline + - displayName: Backup Information Control Data Backup Start Location + path: backupInformation.controlData.Backup start location + - displayName: Backup Information Control Data Backup End Location + path: backupInformation.controlData.Backup end location + - displayName: Backup Information Control Data End-of-backup Record Required + path: backupInformation.controlData.End-of-backup record required + - displayName: Backup Information Control Data Wal_level Setting + path: backupInformation.controlData.wal_level setting + - displayName: Backup Information Control Data Wal_log_hints Setting + path: backupInformation.controlData.wal_log_hints setting + - displayName: Backup Information Control Data Max_connections Setting + path: backupInformation.controlData.max_connections setting + - displayName: Backup Information Control Data Max_worker_processes Setting + path: backupInformation.controlData.max_worker_processes setting + - displayName: Backup Information Control Data Max_wal_senders Setting + path: backupInformation.controlData.max_wal_senders setting + - displayName: Backup Information Control Data Max_prepared_xacts Setting + path: backupInformation.controlData.max_prepared_xacts setting + - displayName: Backup Information Control Data Max_locks_per_xact Setting + path: backupInformation.controlData.max_locks_per_xact setting + - displayName: Backup Information Control Data Track_commit_timestamp Setting + path: backupInformation.controlData.track_commit_timestamp setting + - displayName: Backup Information Control Data Maximum Data Alignment + path: backupInformation.controlData.Maximum data alignment + - displayName: Backup Information Control Data Database Block Size + path: backupInformation.controlData.Database block size + - displayName: Backup Information Control Data Blocks Per Segment Of Large + Relation + path: backupInformation.controlData.Blocks per segment of large relation + - displayName: Backup Information Control Data WAL Block Size + path: backupInformation.controlData.WAL block size + - displayName: Backup Information Control Data Bytes Per WAL Segment + path: backupInformation.controlData.Bytes per WAL segment + - displayName: Backup Information Control Data Maximum Length Of Identifiers + path: backupInformation.controlData.Maximum length of identifiers + - displayName: Backup Information Control Data Maximum Columns In An Index + path: backupInformation.controlData.Maximum columns in an index + - displayName: Backup Information Control Data Maximum Size Of ATOAST Chunk + path: backupInformation.controlData.Maximum size of a TOAST chunk + - displayName: Backup Information Control Data Size Of A Large-object Chunk + path: backupInformation.controlData.Size of a large-object chunk + - displayName: Backup Information Control Data Date/time Type Storage + path: backupInformation.controlData.Date/time type storage + - displayName: Backup Information Control Data Float4 Argument Passing + path: backupInformation.controlData.Float4 argument passing + - displayName: Backup Information Control Data Float8 Argument Passing + path: backupInformation.controlData.Float8 argument passing + - displayName: Backup Information Control Data Data Page Checksum Version + path: backupInformation.controlData.Data page checksum version + - displayName: Backup Information Control Data Mock Authentication Nonce + path: backupInformation.controlData.Mock authentication nonce + - description: 'Continuous Archiving backups are composed of periodic *base + backups* and all the WAL segments produced in between those base backups. + This parameter specifies at what time and with what frequency to start + performing a new base backup. + + + Use cron syntax (`m h dom mon dow`) for this parameter, i.e., 5 values + separated by spaces: + + * `m`: minute, 0 to 59 + + * `h`: hour, 0 to 23 + + * `dom`: day of month, 1 to 31 (recommended not to set it higher than + 28) + + * `mon`: month, 1 to 12 + + * `dow`: day of week, 0 to 7 (0 and 7 both represent Sunday) + + + Also ranges of values (`start-end`), the symbol `*` (meaning `first-last`) + or even `*/N`, where `N` is a number, meaning every `N`, may be used. + All times are UTC. It is recommended to avoid 00:00 as base backup time, + to avoid overlapping with any other external operations happening at + this time. + + ' + displayName: SGBackup Config Base Backups Cron Schedule + path: sgBackupConfig.baseBackups.cronSchedule + - description: 'Based on this parameter, an automatic retention policy is + defined to delete old base backups. + + This parameter specifies the number of base backups to keep, in a sliding + window. + + Consequently, the time range covered by backups is `periodicity*retention`, + where `periodicity` is the separation between backups as specified by + the `cronSchedule` property. + + + Default is 5. + + ' + displayName: SGBackup Config Base Backups Retention + path: sgBackupConfig.baseBackups.retention + - description: 'Select the backup compression algorithm. Possible options + are: lz4, lzma, brotli. The default method is `lz4`. LZ4 is the fastest + method, but compression ratio is the worst. LZMA is way slower, but + it compresses backups about 6 times better than LZ4. Brotli is a good + trade-off between speed and compression ratio, being about 3 times better + than LZ4. + + ' + displayName: SGBackup Config Base Backups Compression + path: sgBackupConfig.baseBackups.compression + - description: '**Deprecated**: use instead maxNetworkBandwidth. + + + Maximum storage upload bandwidth to be used when storing the backup. + In bytes (per second). + + ' + displayName: SGBackup Config Base Backups Performance Max Network Bandwitdh + path: sgBackupConfig.baseBackups.performance.maxNetworkBandwitdh + - description: '**Deprecated**: use instead maxDiskBandwidth. + + + Maximum disk read I/O when performing a backup. In bytes (per second). + + ' + displayName: SGBackup Config Base Backups Performance Max Disk Bandwitdh + path: sgBackupConfig.baseBackups.performance.maxDiskBandwitdh + - description: 'Maximum storage upload bandwidth to be used when storing + the backup. In bytes (per second). + + ' + displayName: SGBackup Config Base Backups Performance Max Network Bandwidth + path: sgBackupConfig.baseBackups.performance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: SGBackup Config Base Backups Performance Max Disk Bandwidth + path: sgBackupConfig.baseBackups.performance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to store + the data. This parameter configures the number of parallel streams to + use to reading from disk. By default, it''s set to 1 (use one stream). + + ' + displayName: SGBackup Config Base Backups Performance Upload Disk Concurrency + path: sgBackupConfig.baseBackups.performance.uploadDiskConcurrency + - description: 'Backup storage may use several concurrent streams to store + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to 1 (use one stream). + + ' + displayName: SGBackup Config Base Backups Performance Upload Concurrency + path: sgBackupConfig.baseBackups.performance.uploadConcurrency + - description: 'Select the backup compression algorithm. Possible options + are: lz4, lzma, brotli. The default method is `lz4`. LZ4 is the fastest + method, but compression ratio is the worst. LZMA is way slower, but + it compresses backups about 6 times better than LZ4. Brotli is a good + trade-off between speed and compression ratio, being about 3 times better + than LZ4. + + ' + displayName: SGBackup Config Compression + path: sgBackupConfig.compression + - description: 'Specifies the type of object storage used for storing the + base backups and WAL segments. + + Possible values: + + * `s3`: Amazon Web Services S3 (Simple Storage Service). + + * `s3Compatible`: non-AWS services that implement a compatibility API + with AWS S3. + + * `gcs`: Google Cloud Storage. + + * `azureBlob`: Microsoft Azure Blob Storage. + + ' + displayName: SGBackup Config Storage Type + path: sgBackupConfig.storage.type + - description: 'AWS S3 bucket name. + + ' + displayName: SGBackup Config Storage S3 Bucket + path: sgBackupConfig.storage.s3.bucket + - description: 'Optional path within the S3 bucket. Note that StackGres + generates in any case a folder per + + StackGres cluster, using the `SGCluster.metadata.name`. + + ' + displayName: SGBackup Config Storage S3 Path + path: sgBackupConfig.storage.s3.path + - description: 'AWS S3 region. The Region may be detected using s3:GetBucketLocation, + but to avoid giving permissions to this API call or forbid it from the + applicable IAM policy, this property must be explicitely specified. + + ' + displayName: SGBackup Config Storage S3 Region + path: sgBackupConfig.storage.s3.region + - description: '[Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + used for the backup object storage. By default, the `STANDARD` storage + class is used. Other supported values include `STANDARD_IA` for Infrequent + Access and `REDUCED_REDUNDANCY`. + + ' + displayName: SGBackup Config Storage S3 Storage Class + path: sgBackupConfig.storage.s3.storageClass + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: SGBackup Config Storage S3 Aws Credentials Secret Key Selectors + Access Key Id Key + path: sgBackupConfig.storage.s3.awsCredentials.secretKeySelectors.accessKeyId.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: SGBackup Config Storage S3 Aws Credentials Secret Key Selectors + Access Key Id Name + path: sgBackupConfig.storage.s3.awsCredentials.secretKeySelectors.accessKeyId.name + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: SGBackup Config Storage S3 Aws Credentials Secret Key Selectors + Secret Access Key Key + path: sgBackupConfig.storage.s3.awsCredentials.secretKeySelectors.secretAccessKey.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: SGBackup Config Storage S3 Aws Credentials Secret Key Selectors + Secret Access Key Name + path: sgBackupConfig.storage.s3.awsCredentials.secretKeySelectors.secretAccessKey.name + - description: 'Bucket name. + + ' + displayName: SGBackup Config Storage S3 Compatible Bucket + path: sgBackupConfig.storage.s3Compatible.bucket + - description: 'Optional path within the S3 bucket. Note that StackGres + generates in any case a folder per StackGres cluster, using the `SGCluster.metadata.name`. + + ' + displayName: SGBackup Config Storage S3 Compatible Path + path: sgBackupConfig.storage.s3Compatible.path + - description: 'Enable path-style addressing (i.e. `http://s3.amazonaws.com/BUCKET/KEY`) + when connecting to an S3-compatible service that lacks support for sub-domain + style bucket URLs (i.e. `http://BUCKET.s3.amazonaws.com/KEY`). Defaults + to false. + + ' + displayName: SGBackup Config Storage S3 Compatible Enable Path Style Addressing + path: sgBackupConfig.storage.s3Compatible.enablePathStyleAddressing + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Overrides the default url to connect to an S3-compatible + service. + + For example: `http://s3-like-service:9000`. + + ' + displayName: SGBackup Config Storage S3 Compatible Endpoint + path: sgBackupConfig.storage.s3Compatible.endpoint + - description: 'AWS S3 region. The Region may be detected using s3:GetBucketLocation, + but to avoid giving permissions to this API call or forbid it from the + applicable IAM policy, this property must be explicitely specified. + + ' + displayName: SGBackup Config Storage S3 Compatible Region + path: sgBackupConfig.storage.s3Compatible.region + - description: '[Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + used for the backup object storage. By default, the `STANDARD` storage + class is used. Other supported values include `STANDARD_IA` for Infrequent + Access and `REDUCED_REDUNDANCY`. + + ' + displayName: SGBackup Config Storage S3 Compatible Storage Class + path: sgBackupConfig.storage.s3Compatible.storageClass + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: SGBackup Config Storage S3 Compatible Aws Credentials Secret + Key Selectors Access Key Id Key + path: sgBackupConfig.storage.s3Compatible.awsCredentials.secretKeySelectors.accessKeyId.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: SGBackup Config Storage S3 Compatible Aws Credentials Secret + Key Selectors Access Key Id Name + path: sgBackupConfig.storage.s3Compatible.awsCredentials.secretKeySelectors.accessKeyId.name + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: SGBackup Config Storage S3 Compatible Aws Credentials Secret + Key Selectors Secret Access Key Key + path: sgBackupConfig.storage.s3Compatible.awsCredentials.secretKeySelectors.secretAccessKey.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: SGBackup Config Storage S3 Compatible Aws Credentials Secret + Key Selectors Secret Access Key Name + path: sgBackupConfig.storage.s3Compatible.awsCredentials.secretKeySelectors.secretAccessKey.name + - description: 'GCS bucket name. + + ' + displayName: SGBackup Config Storage Gcs Bucket + path: sgBackupConfig.storage.gcs.bucket + - description: 'Optional path within the GCS bucket. Note that StackGres + generates in any case a folder per StackGres cluster, using the `SGCluster.metadata.name`. + + ' + displayName: SGBackup Config Storage Gcs Path + path: sgBackupConfig.storage.gcs.path + - description: 'If true, the credentials will be fetched from the GCE/GKE + metadata service and the credentials from `secretKeySelectors` field + will not be used. + + + This is useful when running StackGres inside a GKE cluster using [Workload + Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). + + ' + displayName: SGBackup Config Storage Gcs Gcp Credentials Fetch Credentials + From Metadata Service + path: sgBackupConfig.storage.gcs.gcpCredentials.fetchCredentialsFromMetadataService + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: SGBackup Config Storage Gcs Gcp Credentials Secret Key Selectors + Service Account JSON Key + path: sgBackupConfig.storage.gcs.gcpCredentials.secretKeySelectors.serviceAccountJSON.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: SGBackup Config Storage Gcs Gcp Credentials Secret Key Selectors + Service Account JSON Name + path: sgBackupConfig.storage.gcs.gcpCredentials.secretKeySelectors.serviceAccountJSON.name + - description: 'Azure Blob Storage bucket name. + + ' + displayName: SGBackup Config Storage Azure Blob Bucket + path: sgBackupConfig.storage.azureBlob.bucket + - description: 'Optional path within the Azure Blobk bucket. Note that StackGres + generates in any case a folder per StackGres cluster, using the `SGCluster.metadata.name`. + + ' + displayName: SGBackup Config Storage Azure Blob Path + path: sgBackupConfig.storage.azureBlob.path + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: SGBackup Config Storage Azure Blob Azure Credentials Secret + Key Selectors Storage Account Key + path: sgBackupConfig.storage.azureBlob.azureCredentials.secretKeySelectors.storageAccount.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: SGBackup Config Storage Azure Blob Azure Credentials Secret + Key Selectors Storage Account Name + path: sgBackupConfig.storage.azureBlob.azureCredentials.secretKeySelectors.storageAccount.name + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: SGBackup Config Storage Azure Blob Azure Credentials Secret + Key Selectors Access Key Key + path: sgBackupConfig.storage.azureBlob.azureCredentials.secretKeySelectors.accessKey.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: SGBackup Config Storage Azure Blob Azure Credentials Secret + Key Selectors Access Key Name + path: sgBackupConfig.storage.azureBlob.azureCredentials.secretKeySelectors.accessKey.name + - description: 'The volume snapshot used to store this backup. + + ' + displayName: Volume Snapshot Name + path: volumeSnapshot.name + - description: 'The content of `backup_label` column returned by `pg_backup_stop` + encoded in Base64 + + ' + displayName: Volume Snapshot Backup Label + path: volumeSnapshot.backupLabel + - description: 'The content of `tablespace_map` column returned by `pg_backup_stop` + encoded in Base64 + + ' + displayName: Volume Snapshot Tablespace Map + path: volumeSnapshot.tablespaceMap + version: v1 + - description: Main CRD, manages Postgres clusters (one or more Postgres pods) + displayName: StackGres Cluster + kind: SGCluster + name: sgclusters.stackgres.io + specDescriptors: + - description: "The profile allow to change in a convenient place a set\ + \ of configuration defaults that affect how the cluster is generated.\n\ + \nAll those defaults can be overwritten by setting the correspoinding\ + \ fields.\n\nAvailable profiles are:\n\n* `production`:\n\n Prevents\ + \ two Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `false` by default).\n Sets both limits and requests using `SGInstanceProfile`\ + \ for `patroni` container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced `SGInstanceProfile`\ + \ for sidecar containers other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `testing`:\n\n Allows two Pods to running\ + \ in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Sets both limits and requests using `SGInstanceProfile`\ + \ for `patroni` container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced `SGInstanceProfile`\ + \ for sidecar containers other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `development`:\n\n Allows two Pods from\ + \ running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Unset both limits and requests for `patroni`\ + \ container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `true` by default).\n Unsets requests for sidecar containers other\ + \ than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `true` by default).\n\n**Changing this field may require a restart.**\n" + displayName: Profile + path: profile + - description: 'Postgres version used on the cluster. It is either of: + + * The string ''latest'', which automatically sets the latest major.minor + Postgres version. + + * A major version, like ''14'' or ''13'', which sets that major version + and the latest minor version. + + * A specific major.minor version, like ''14.4''. + + ' + displayName: Postgres Version + path: postgres.version + - description: "Postgres flavor used on the cluster. It is either of:\n\n\ + \ * `vanilla` will use the [Official Postgres](https://www.postgresql.org/)\n\ + \ * `babelfish` will use the [Babelfish for Postgres](https://babelfish-for-postgresql.github.io/babelfish-for-postgresql/).\n\ + \nIf not specified then the vanilla Postgres will be used for the cluster.\n\ + \n**This field can only be set on creation.**\n" + displayName: Postgres Flavor + path: postgres.flavor + - description: The name of the extension to deploy. + displayName: Postgres Extensions Name + path: postgres.extensions.name + - description: The id of the publisher of the extension to deploy. If not + specified `com.ongres` will be used by default. + displayName: Postgres Extensions Publisher + path: postgres.extensions.publisher + - description: The version of the extension to deploy. If not specified + version of `stable` channel will be used by default and if only a version + is available that one will be used. + displayName: Postgres Extensions Version + path: postgres.extensions.version + - description: 'The repository base URL from where to obtain the extension + to deploy. + + + **This section is filled by the operator.** + + ' + displayName: Postgres Extensions Repository + path: postgres.extensions.repository + - description: 'Allow to enable SSL for connections to Postgres. By default + is `false`. + + + If `true` certificate and private key will be auto-generated unless + fields `certificateSecretKeySelector` and `privateKeySecretKeySelector` + are specified. + + ' + displayName: Postgres Ssl Enabled + path: postgres.ssl.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Secret key selector for the certificate or certificate chain + used for SSL connections. + + ' + displayName: Postgres Ssl Certificate Secret Key Selector + path: postgres.ssl.certificateSecretKeySelector + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The name of Secret that contains the certificate or certificate + chain for SSL connections + + ' + displayName: Postgres Ssl Certificate Secret Key Selector Name + path: postgres.ssl.certificateSecretKeySelector.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'The key of Secret that contains the certificate or certificate + chain for SSL connections + + ' + displayName: Postgres Ssl Certificate Secret Key Selector Key + path: postgres.ssl.certificateSecretKeySelector.key + - description: 'Secret key selector for the private key used for SSL connections. + + ' + displayName: Postgres Ssl Private Key Secret Key Selector + path: postgres.ssl.privateKeySecretKeySelector + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The name of Secret that contains the private key for SSL + connections + + ' + displayName: Postgres Ssl Private Key Secret Key Selector Name + path: postgres.ssl.privateKeySecretKeySelector.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'The key of Secret that contains the private key for SSL + connections + + ' + displayName: Postgres Ssl Private Key Secret Key Selector Key + path: postgres.ssl.privateKeySecretKeySelector.key + - description: "Number of instances for the StackGres cluster. Each instance\ + \ is a Pod containing one Postgres server.\n Out of all of the Postgres\ + \ servers, one is elected as the primary, the rest remain as read-only\ + \ replicas.\n" + displayName: Instances + path: instances + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: 'Allow to enable or disable any of horizontal and vertical + Pod autoscaling. + + + Possible values are: + + * `all`: both horizontal and vertical Pod autoscaling will be enabled + (default) + + * `horizontal`: only horizontal Pod autoscaling will be enabled + + * `vertical`: only vertical Pod autoscaling will be enabled + + * `none`: all autoscaling will be disabled + + ' + displayName: Autoscaling Mode + path: autoscaling.mode + - description: 'The total minimum number of instances that the SGCluster + will have (including the primary instance). + + + This field is ignored when horizontal Pod autoscaling is disabled. + + ' + displayName: Autoscaling Min Instances + path: autoscaling.minInstances + - description: 'The total maximum number of instances that the SGCluster + will have (including the primary instance). + + + This field is ignored when horizontal Pod autoscaling is disabled. + + ' + displayName: Autoscaling Max Instances + path: autoscaling.maxInstances + - description: The minimum allowed CPU for the patroni container + displayName: Autoscaling Min Allowed Patroni Cpu + path: autoscaling.minAllowed.patroni.cpu + - description: The minimum allowed memory for the patroni container + displayName: Autoscaling Min Allowed Patroni Memory + path: autoscaling.minAllowed.patroni.memory + - description: The minimum allowed CPU for the pgbouncer container + displayName: Autoscaling Min Allowed Pgbouncer Cpu + path: autoscaling.minAllowed.pgbouncer.cpu + - description: The minimum allowed memory for the pgbouncer container + displayName: Autoscaling Min Allowed Pgbouncer Memory + path: autoscaling.minAllowed.pgbouncer.memory + - description: The minimum allowed CPU for the envoy container + displayName: Autoscaling Min Allowed Envoy Cpu + path: autoscaling.minAllowed.envoy.cpu + - description: The minimum allowed memory for the envoy container + displayName: Autoscaling Min Allowed Envoy Memory + path: autoscaling.minAllowed.envoy.memory + - description: The maximum allowed CPU for the patroni container + displayName: Autoscaling Max Allowed Patroni Cpu + path: autoscaling.maxAllowed.patroni.cpu + - description: The maximum allowed memory for the patroni container + displayName: Autoscaling Max Allowed Patroni Memory + path: autoscaling.maxAllowed.patroni.memory + - description: The maximum allowed CPU for the pgbouncer container + displayName: Autoscaling Max Allowed Pgbouncer Cpu + path: autoscaling.maxAllowed.pgbouncer.cpu + - description: The maximum allowed memory for the pgbouncer container + displayName: Autoscaling Max Allowed Pgbouncer Memory + path: autoscaling.maxAllowed.pgbouncer.memory + - description: The maximum allowed CPU for the envoy container + displayName: Autoscaling Max Allowed Envoy Cpu + path: autoscaling.maxAllowed.envoy.cpu + - description: The maximum allowed memory for the envoy container + displayName: Autoscaling Max Allowed Envoy Memory + path: autoscaling.maxAllowed.envoy.memory + - description: 'The target value for replicas connections used in order + to trigger the upscale of replica instances. + + ' + displayName: Autoscaling Horizontal Replicas Connections Usage Target + path: autoscaling.horizontal.replicasConnectionsUsageTarget + - description: 'The metric type for connections used metric. See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + displayName: Autoscaling Horizontal Replicas Connections Usage Metric + Type + path: autoscaling.horizontal.replicasConnectionsUsageMetricType + - description: 'The period in seconds before the downscale of replica instances + can be triggered. + + ' + displayName: Autoscaling Horizontal Cooldown Period + path: autoscaling.horizontal.cooldownPeriod + - description: 'The interval in seconds to check if the scaleup or scaledown + have to be triggered. + + ' + displayName: Autoscaling Horizontal Polling Interval + path: autoscaling.horizontal.pollingInterval + - description: 'Recommender responsible for generating recommendation for + vertical Pod autoscaling. If not specified the default one will be used. + + ' + displayName: Autoscaling Vertical Recommender + path: autoscaling.vertical.recommender + - description: "The replication mode applied to the whole cluster.\nPossible\ + \ values are:\n* `async` (default)\n* `sync`\n* `strict-sync`\n* `sync-all`\n\ + * `strict-sync-all`\n\n**async**\n\nWhen in asynchronous mode the cluster\ + \ is allowed to lose some committed transactions.\n When the primary\ + \ server fails or becomes unavailable for any other reason a sufficiently\ + \ healthy standby\n will automatically be promoted to primary. Any\ + \ transactions that have not been replicated to that standby\n remain\ + \ in a \"forked timeline\" on the primary, and are effectively unrecoverable\ + \ (the data is still there,\n but recovering it requires a manual recovery\ + \ effort by data recovery specialists).\n\n**sync**\n\nWhen in synchronous\ + \ mode a standby will not be promoted unless it is certain that the\ + \ standby contains all\n transactions that may have returned a successful\ + \ commit status to client (clients can change the behavior\n per transaction\ + \ using PostgreSQL’s `synchronous_commit` setting. Transactions with\ + \ `synchronous_commit`\n values of `off` and `local` may be lost on\ + \ fail over, but will not be blocked by replication delays). This\n\ + \ means that the system may be unavailable for writes even though some\ + \ servers are available. System\n administrators can still use manual\ + \ failover commands to promote a standby even if it results in transaction\n\ + \ loss.\n\nSynchronous mode does not guarantee multi node durability\ + \ of commits under all circumstances. When no suitable\n standby is\ + \ available, primary server will still accept writes, but does not guarantee\ + \ their replication. When\n the primary fails in this mode no standby\ + \ will be promoted. When the host that used to be the primary comes\n\ + \ back it will get promoted automatically, unless system administrator\ + \ performed a manual failover. This behavior\n makes synchronous mode\ + \ usable with 2 node clusters.\n\nWhen synchronous mode is used and\ + \ a standby crashes, commits will block until the primary is switched\ + \ to standalone\n mode. Manually shutting down or restarting a standby\ + \ will not cause a commit service interruption. Standby will\n signal\ + \ the primary to release itself from synchronous standby duties before\ + \ PostgreSQL shutdown is initiated.\n\n**strict-sync**\n\nWhen it is\ + \ absolutely necessary to guarantee that each write is stored durably\ + \ on at least two nodes, use the strict\n synchronous mode. This mode\ + \ prevents synchronous replication to be switched off on the primary\ + \ when no synchronous\n standby candidates are available. As a downside,\ + \ the primary will not be available for writes (unless the Postgres\n\ + \ transaction explicitly turns off `synchronous_mode` parameter), blocking\ + \ all client write requests until at least one\n synchronous replica\ + \ comes up.\n\n**Note**: Because of the way synchronous replication\ + \ is implemented in PostgreSQL it is still possible to lose\n transactions\ + \ even when using strict synchronous mode. If the PostgreSQL backend\ + \ is cancelled while waiting to acknowledge\n replication (as a result\ + \ of packet cancellation due to client timeout or backend failure) transaction\ + \ changes become\n visible for other backends. Such changes are not\ + \ yet replicated and may be lost in case of standby promotion.\n\n**sync-all**\n\ + \nThe same as `sync` but `syncInstances` is ignored and the number of\ + \ synchronous instances is equals to the total number\n of instances\ + \ less one.\n\n**strict-sync-all**\n\nThe same as `strict-sync` but\ + \ `syncInstances` is ignored and the number of synchronous instances\ + \ is equals to the total number\n of instances less one.\n" + displayName: Replication Mode + path: replication.mode + - description: 'This role is applied to the instances of the implicit replication + group that is composed by `.spec.instances` number of instances. + + Possible values are: + + * `ha-read` (default) + + * `ha` + + The primary instance will be elected among all the replication groups + that are either `ha` or `ha-read`. + + Only if the role is set to `ha-read` instances of main replication group + will be exposed via the replicas service. + + ' + displayName: Replication Role + path: replication.role + - description: "Number of synchronous standby instances. Must be less than\ + \ the total number of instances. It is set to 1 by default.\n Only\ + \ setteable if mode is `sync` or `strict-sync`.\n" + displayName: Replication Sync Instances + path: replication.syncInstances + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: The name of the replication group. If not set will default + to the `group-`. + displayName: Replication Groups Name + path: replication.groups.name + - description: 'This role is applied to the instances of this replication + group. + + Possible values are: + + * `ha-read` + + * `ha` + + * `readonly` + + * `none` + + The primary instance will be elected among all the replication groups + that are either `ha` or `ha-read`. + + Only if the role is set to `readonly` or `ha-read` instances of such + replication group will be exposed via the replicas service. + + ' + displayName: Replication Groups Role + path: replication.groups.role + - description: "Number of StackGres instances for this replication group.\n\ + \nThe total number of instance of a cluster is always `.spec.instances`.\ + \ The sum of the instances in all the replication groups must be\n \ + \ less than the total number of instances.\n" + displayName: Replication Groups Instances + path: replication.groups.instances + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: "Minimum number of StackGres instances for this replication\ + \ group. It is ignored when horizontal Pod autoscaling is disabled (see\ + \ `.spec.autoscaling`)\n\nThe total minimum number of instance of a\ + \ cluster is always `.spec.autoscaling.minInstances`. The sum of the\ + \ minimum instances in all the replication groups must be\n less than\ + \ the total minimum number of instances.\n\nWhen this field is set the\ + \ instances value that is provided by the user it is overwritten using\ + \ the following formula to calculate it:\n\n```\n = max(,\ + \ * / )\n\ + ```\n" + displayName: Replication Groups Min Instances + path: replication.groups.minInstances + - description: "Allow to specify how the replicas are initialized.\n\nPossible\ + \ values are:\n\n* `FromPrimary`: When this mode is used replicas will\ + \ be always created from the primary using `pg_basebackup`.\n* `FromReplica`:\ + \ When this mode is used replicas will be created from another existing\ + \ replica using\n `pg_basebackup`. Fallsback to `FromPrimary` if there's\ + \ no replica or it fails.\n* `FromExistingBackup`: When this mode is\ + \ used replicas will be created from an existing SGBackup. If `backupNewerThan`\ + \ is set\n the SGBackup must be newer than its value. When this mode\ + \ fails to restore an SGBackup it will try with a previous one (if exists).\n\ + \ Fallsback to `FromReplica` if there's no backup left or it fails.\n\ + * `FromNewlyCreatedBackup`: When this mode is used replicas will be\ + \ created from a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent backup newer than\ + \ its value or it fails.\n" + displayName: Replication Initialization Mode + path: replication.initialization.mode + - description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`, that\ + \ specifies how old an SGBackup have to be in order to be seleceted\n\ + \ to initialize a replica.\n\nWhen `FromExistingBackup` mode is set\ + \ this field restrict the selection of SGBackup to be used for recovery\ + \ newer than the\n specified value. \n\nWhen `FromNewlyCreatedBackup`\ + \ mode is set this field skip the creation SGBackup to be used for recovery\ + \ if one newer than\n the specified value exists. \n" + displayName: Replication Initialization Backup Newer Than + path: replication.initialization.backupNewerThan + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Replication Initialization Backup Restore Performance Max + Network Bandwidth + path: replication.initialization.backupRestorePerformance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Replication Initialization Backup Restore Performance Max + Disk Bandwidth + path: replication.initialization.backupRestorePerformance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Replication Initialization Backup Restore Performance Download + Concurrency + path: replication.initialization.backupRestorePerformance.downloadConcurrency + - description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/reference/crd/sginstanceprofile/). + + + A SGInstanceProfile defines CPU and memory limits. Must exist before + creating a cluster. + + + When no profile is set, a default (1 core, 2 GiB RAM) one is used. + + + **Changing this field may require a restart.** + + ' + displayName: SGInstanceProfile + path: sgInstanceProfile + - displayName: Metadata Annotations All Resources + path: metadata.annotations.allResources + - displayName: Metadata Annotations Cluster Pods + path: metadata.annotations.clusterPods + - displayName: Metadata Annotations Services + path: metadata.annotations.services + - displayName: Metadata Annotations Primary Service + path: metadata.annotations.primaryService + - displayName: Metadata Annotations Replicas Service + path: metadata.annotations.replicasService + - displayName: Metadata Labels Cluster Pods + path: metadata.labels.clusterPods + - displayName: Metadata Labels Services + path: metadata.labels.services + - description: Specify if the service should be created or not. + displayName: Postgres Services Primary Enabled + path: postgresServices.primary.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'type determines how the Service is exposed. Defaults to + ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to endpoints. + + "NodePort" builds on ClusterIP and allocates a port on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + displayName: Postgres Services Primary Type + path: postgresServices.primary.type + - description: allocateLoadBalancerNodePorts defines if NodePorts will be + automatically allocated for services with type LoadBalancer. Default + is "true". It may be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific NodePorts (by + specifying a value), those requests will be respected, regardless of + this field. This field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any other type. + displayName: Postgres Services Primary Allocate Load Balancer Node Ports + path: postgresServices.primary.allocateLoadBalancerNodePorts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Postgres Services Primary External I Ps + path: postgresServices.primary.externalIPs + - description: externalTrafficPolicy describes how nodes distribute service + traffic they receive on one of the Service's "externally-facing" addresses + (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the + proxy will configure the service in a way that assumes that external + load balancers will take care of balancing the service traffic between + nodes, and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client source IP. + (Traffic mistakenly sent to a node with no endpoints will be dropped.) + The default value, "Cluster", uses the standard behavior of routing + to all endpoints evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer IP from within + the cluster will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to take traffic policy + into account when picking a node. + displayName: Postgres Services Primary External Traffic Policy + path: postgresServices.primary.externalTrafficPolicy + - description: healthCheckNodePort specifies the healthcheck nodePort for + the service. This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, a value will + be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). This + field cannot be updated once set. + displayName: Postgres Services Primary Health Check Node Port + path: postgresServices.primary.healthCheckNodePort + - description: InternalTrafficPolicy describes how nodes distribute service + traffic they receive on the ClusterIP. If set to "Local", the proxy + will assume that pods only want to talk to endpoints of the service + on the same node as the pod, dropping the traffic if there are no local + endpoints. The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified by topology and + other features). + displayName: Postgres Services Primary Internal Traffic Policy + path: postgresServices.primary.internalTrafficPolicy + - displayName: Postgres Services Primary Ip Families + path: postgresServices.primary.ipFamilies + - description: IPFamilyPolicy represents the dual-stack-ness requested or + required by this Service. If there is no value provided, then this field + will be set to SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on dual-stack configured + clusters or a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). + The ipFamilies and clusterIPs fields depend on the value of this field. + This field will be wiped when updating a service to type ExternalName. + displayName: Postgres Services Primary Ip Family Policy + path: postgresServices.primary.ipFamilyPolicy + - description: loadBalancerClass is the class of the load balancer implementation + this Service belongs to. If specified, the value of this field must + be a label-style identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is used, today + this is typically done through the cloud provider integration, but should + apply for any default implementation. If set, it is assumed that a load + balancer implementation is watching for Services with a matching class. + Any default load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only be set when + creating or updating a Service to type 'LoadBalancer'. Once set, it + can not be changed. This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + displayName: Postgres Services Primary Load Balancer Class + path: postgresServices.primary.loadBalancerClass + - description: 'Only applies to Service Type: LoadBalancer. This feature + depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. This field will + be ignored if the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. Users are + encouraged to use implementation-specific annotations when available.' + displayName: Postgres Services Primary Load Balancer IP + path: postgresServices.primary.loadBalancerIP + - displayName: Postgres Services Primary Load Balancer Source Ranges + path: postgresServices.primary.loadBalancerSourceRanges + - description: publishNotReadyAddresses indicates that any agent which deals + with endpoints for this Service should disregard any indications of + ready/not-ready. The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes controllers that + generate Endpoints and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" even if the Pods + themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this + behavior. + displayName: Postgres Services Primary Publish Not Ready Addresses + path: postgresServices.primary.publishNotReadyAddresses + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Supports "ClientIP" and "None". Used to maintain session + affinity. Enable client IP based session affinity. Must be ClientIP + or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + displayName: Postgres Services Primary Session Affinity + path: postgresServices.primary.sessionAffinity + - description: timeoutSeconds specifies the seconds of ClientIP type session + sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + displayName: Postgres Services Primary Session Affinity Config Client + IP Timeout Seconds + path: postgresServices.primary.sessionAffinityConfig.clientIP.timeoutSeconds + - description: the node port that will be exposed to connect to Postgres + instance + displayName: Postgres Services Primary Node Ports Pgport + path: postgresServices.primary.nodePorts.pgport + - description: the node port that will be exposed to connect to Postgres + instance for replication purpose + displayName: Postgres Services Primary Node Ports Replicationport + path: postgresServices.primary.nodePorts.replicationport + - description: the node port that will be exposed to connect to Babelfish + instance using SQL Server wire-protocol and T-SQL + displayName: Postgres Services Primary Node Ports Babelfish + path: postgresServices.primary.nodePorts.babelfish + - description: Specify if the service should be created or not. + displayName: Postgres Services Replicas Enabled + path: postgresServices.replicas.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'type determines how the Service is exposed. Defaults to + ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to endpoints. + + "NodePort" builds on ClusterIP and allocates a port on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + displayName: Postgres Services Replicas Type + path: postgresServices.replicas.type + - description: allocateLoadBalancerNodePorts defines if NodePorts will be + automatically allocated for services with type LoadBalancer. Default + is "true". It may be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific NodePorts (by + specifying a value), those requests will be respected, regardless of + this field. This field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any other type. + displayName: Postgres Services Replicas Allocate Load Balancer Node Ports + path: postgresServices.replicas.allocateLoadBalancerNodePorts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Postgres Services Replicas External I Ps + path: postgresServices.replicas.externalIPs + - description: externalTrafficPolicy describes how nodes distribute service + traffic they receive on one of the Service's "externally-facing" addresses + (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the + proxy will configure the service in a way that assumes that external + load balancers will take care of balancing the service traffic between + nodes, and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client source IP. + (Traffic mistakenly sent to a node with no endpoints will be dropped.) + The default value, "Cluster", uses the standard behavior of routing + to all endpoints evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer IP from within + the cluster will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to take traffic policy + into account when picking a node. + displayName: Postgres Services Replicas External Traffic Policy + path: postgresServices.replicas.externalTrafficPolicy + - description: healthCheckNodePort specifies the healthcheck nodePort for + the service. This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, a value will + be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). This + field cannot be updated once set. + displayName: Postgres Services Replicas Health Check Node Port + path: postgresServices.replicas.healthCheckNodePort + - description: InternalTrafficPolicy describes how nodes distribute service + traffic they receive on the ClusterIP. If set to "Local", the proxy + will assume that pods only want to talk to endpoints of the service + on the same node as the pod, dropping the traffic if there are no local + endpoints. The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified by topology and + other features). + displayName: Postgres Services Replicas Internal Traffic Policy + path: postgresServices.replicas.internalTrafficPolicy + - displayName: Postgres Services Replicas Ip Families + path: postgresServices.replicas.ipFamilies + - description: IPFamilyPolicy represents the dual-stack-ness requested or + required by this Service. If there is no value provided, then this field + will be set to SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on dual-stack configured + clusters or a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). + The ipFamilies and clusterIPs fields depend on the value of this field. + This field will be wiped when updating a service to type ExternalName. + displayName: Postgres Services Replicas Ip Family Policy + path: postgresServices.replicas.ipFamilyPolicy + - description: loadBalancerClass is the class of the load balancer implementation + this Service belongs to. If specified, the value of this field must + be a label-style identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is used, today + this is typically done through the cloud provider integration, but should + apply for any default implementation. If set, it is assumed that a load + balancer implementation is watching for Services with a matching class. + Any default load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only be set when + creating or updating a Service to type 'LoadBalancer'. Once set, it + can not be changed. This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + displayName: Postgres Services Replicas Load Balancer Class + path: postgresServices.replicas.loadBalancerClass + - description: 'Only applies to Service Type: LoadBalancer. This feature + depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. This field will + be ignored if the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. Users are + encouraged to use implementation-specific annotations when available.' + displayName: Postgres Services Replicas Load Balancer IP + path: postgresServices.replicas.loadBalancerIP + - displayName: Postgres Services Replicas Load Balancer Source Ranges + path: postgresServices.replicas.loadBalancerSourceRanges + - description: publishNotReadyAddresses indicates that any agent which deals + with endpoints for this Service should disregard any indications of + ready/not-ready. The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes controllers that + generate Endpoints and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" even if the Pods + themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this + behavior. + displayName: Postgres Services Replicas Publish Not Ready Addresses + path: postgresServices.replicas.publishNotReadyAddresses + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Supports "ClientIP" and "None". Used to maintain session + affinity. Enable client IP based session affinity. Must be ClientIP + or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + displayName: Postgres Services Replicas Session Affinity + path: postgresServices.replicas.sessionAffinity + - description: timeoutSeconds specifies the seconds of ClientIP type session + sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + displayName: Postgres Services Replicas Session Affinity Config Client + IP Timeout Seconds + path: postgresServices.replicas.sessionAffinityConfig.clientIP.timeoutSeconds + - description: the node port that will be exposed to connect to Postgres + instance + displayName: Postgres Services Replicas Node Ports Pgport + path: postgresServices.replicas.nodePorts.pgport + - description: the node port that will be exposed to connect to Postgres + instance for replication purpose + displayName: Postgres Services Replicas Node Ports Replicationport + path: postgresServices.replicas.nodePorts.replicationport + - description: the node port that will be exposed to connect to Babelfish + instance using SQL Server wire-protocol and T-SQL + displayName: Postgres Services Replicas Node Ports Babelfish + path: postgresServices.replicas.nodePorts.babelfish + - description: 'Size of the PersistentVolume set for each instance of the + cluster. This size is specified either in Mebibytes, Gibibytes or Tebibytes + (multiples of 2^20, 2^30 or 2^40, respectively). + + ' + displayName: Pods Persistent Volume Size + path: pods.persistentVolume.size + - description: 'Name of an existing StorageClass in the Kubernetes cluster, + used to create the PersistentVolumes for the instances of the cluster. + + ' + displayName: Pods Persistent Volume Storage Class + path: pods.persistentVolume.storageClass + - description: 'If set to `true`, avoids creating a connection pooling (using + [PgBouncer](https://www.pgbouncer.org/)) sidecar. + + + **Changing this field may require a restart.** + + ' + displayName: Pods Disable Connection Pooling + path: pods.disableConnectionPooling + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + displayName: Pods Disable Metrics Exporter + path: pods.disableMetricsExporter + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `postgres-util` sidecar. + This sidecar contains usual Postgres administration utilities *that + are not present in the main (`patroni`) container*, like `psql`. Only + disable if you know what you are doing. + + + **Changing this field may require a restart.** + + ' + displayName: Pods Disable Postgres Util + path: pods.disablePostgresUtil + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `envoy` sidecar. This + sidecar is used as the endge proxy for the cluster''s Pods providing + extra metrics to the monitoring layer. + + + **Changing this field may require a restart.** + + ' + displayName: Pods Disable Envoy + path: pods.disableEnvoy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When enabled resource limits for containers other than the + patroni container wil be set just like for patroni contianer as specified + in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + displayName: Pods Resources Enable Cluster Limits Requirements + path: pods.resources.enableClusterLimitsRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the resources requests values in fields\ + \ `SGInstanceProfile.spec.requests.cpu` and `SGInstanceProfile.spec.requests.memory`\ + \ will represent the resources\n requests of the patroni container and\ + \ the total resources requests calculated by adding the resources requests\ + \ of all the containers (including the patroni container).\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Pods Resources Disable Resources Requests Split From Total + path: pods.resources.disableResourcesRequestsSplitFromTotal + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the reconciliation of the cluster will\ + \ fail if `disableResourcesRequestsSplitFromTotal` is not set or set\ + \ to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`.\n\nWhen `false` (the\ + \ default) and `disableResourcesRequestsSplitFromTotal` is not set or\ + \ set to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`\n then the patroni container\ + \ resources will be set to 0.\n" + displayName: Pods Resources Fail When Total Is Higher + path: pods.resources.failWhenTotalIsHigher + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Pods Scheduling Node Selector + path: pods.scheduling.nodeSelector + - description: The pod this Toleration is attached to tolerates any taint + that matches the triple using the matching operator + . + displayName: Pods Scheduling Tolerations + path: pods.scheduling.tolerations + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Pods Scheduling Node Affinity + path: pods.scheduling.nodeAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:nodeAffinity + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Pods Scheduling Priority Class Name + path: pods.scheduling.priorityClassName + - description: 'Pod affinity is a group of inter pod affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + displayName: Pods Scheduling Pod Affinity + path: pods.scheduling.podAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAffinity + - description: 'Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + displayName: Pods Scheduling Pod Anti Affinity + path: pods.scheduling.podAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity + - description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + displayName: Pods Scheduling Topology Spread Constraints + path: pods.scheduling.topologySpreadConstraints + - displayName: Pods Scheduling Backup Node Selector + path: pods.scheduling.backup.nodeSelector + - description: The pod this Toleration is attached to tolerates any taint + that matches the triple using the matching operator + . + displayName: Pods Scheduling Backup Tolerations + path: pods.scheduling.backup.tolerations + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Pods Scheduling Backup Node Affinity + path: pods.scheduling.backup.nodeAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:nodeAffinity + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Pods Scheduling Backup Priority Class Name + path: pods.scheduling.backup.priorityClassName + - description: 'Pod affinity is a group of inter pod affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + displayName: Pods Scheduling Backup Pod Affinity + path: pods.scheduling.backup.podAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAffinity + - description: 'Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + displayName: Pods Scheduling Backup Pod Anti Affinity + path: pods.scheduling.backup.podAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity + - description: "managementPolicy controls how pods are created during initial\ + \ scale up, when replacing pods\n on nodes, or when scaling down. The\ + \ default policy is `OrderedReady`, where pods are created\n in increasing\ + \ order (pod-0, then pod-1, etc) and the controller will wait until\ + \ each pod is\n ready before continuing. When scaling down, the pods\ + \ are removed in the opposite order.\n The alternative policy is `Parallel`\ + \ which will create pods in parallel to match the desired\n scale without\ + \ waiting, and on scale down will delete all pods at once.\n" + displayName: Pods Management Policy + path: pods.managementPolicy + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Pods Custom Volume Mounts Mount Path + path: pods.customVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Pods Custom Volume Mounts Mount Propagation + path: pods.customVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Pods Custom Volume Mounts Name + path: pods.customVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Pods Custom Volume Mounts Read Only + path: pods.customVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Pods Custom Volume Mounts Sub Path + path: pods.customVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Pods Custom Volume Mounts Sub Path Expr + path: pods.customVolumeMounts.subPathExpr + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Pods Custom Init Volume Mounts Mount Path + path: pods.customInitVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Pods Custom Init Volume Mounts Mount Propagation + path: pods.customInitVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Pods Custom Init Volume Mounts Name + path: pods.customInitVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Pods Custom Init Volume Mounts Read Only + path: pods.customInitVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Pods Custom Init Volume Mounts Sub Path + path: pods.customInitVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Pods Custom Init Volume Mounts Sub Path Expr + path: pods.customInitVolumeMounts.subPathExpr + - description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. + + + It must exist. When not set, a default Postgres config, for the major + version selected, is used. + + + **Changing this field may require a restart.** + + ' + displayName: Configurations SGPostgresConfig + path: configurations.sgPostgresConfig + - description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. + + + Each pod contains a sidecar with a connection pooler (currently: [PgBouncer](https://www.pgbouncer.org/)). + The connection pooler is implemented as a sidecar. + + + If not set, a default configuration will be used. Disabling connection + pooling altogether is possible if the disableConnectionPooling property + of the pods object is set to true. + + + **Changing this field may require a restart.** + + ' + displayName: Configurations SGPoolingConfig + path: configurations.sgPoolingConfig + - description: 'If set to `true`, avoids creating the Prometheus exporter + sidecar. Recommended when there''s no intention to use internal monitoring. + + + **Changing this field may require a restart.** + + ' + displayName: Configurations Observability Disable Metrics + path: configurations.observability.disableMetrics + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Indicate the receiver name (for type prometheus) in the configuration + for the collector scraper (if not specified the default empty name will + be used). + displayName: Configurations Observability Receiver + path: configurations.observability.receiver + - description: If set to `true`, a PodMonitor is created for each Prometheus + instance as specified in the SGConfig.spec.collector.prometheusOperator.monitors + section. + displayName: Configurations Observability Prometheus Autobind + path: configurations.observability.prometheusAutobind + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Specifies the backup compression algorithm. Possible options + are: lz4, lzma, brotli. The default method is `lz4`. LZ4 is the fastest + method, but compression ratio is the worst. LZMA is way slower, but + it compresses backups about 6 times better than LZ4. Brotli is a good + trade-off between speed and compression ratio, being about 3 times better + than LZ4. + + ' + displayName: Configurations Backups Compression + path: configurations.backups.compression + - description: 'Continuous Archiving backups are composed of periodic *base + backups* and all the WAL segments produced in between those base backups. + This parameter specifies at what time and with what frequency to start + performing a new base backup. + + + Use cron syntax (`m h dom mon dow`) for this parameter, i.e., 5 values + separated by spaces: + + * `m`: minute, 0 to 59. + + * `h`: hour, 0 to 23. + + * `dom`: day of month, 1 to 31 (recommended not to set it higher than + 28). + + * `mon`: month, 1 to 12. + + * `dow`: day of week, 0 to 7 (0 and 7 both represent Sunday). + + + Also ranges of values (`start-end`), the symbol `*` (meaning `first-last`) + or even `*/N`, where `N` is a number, meaning ""every `N`, may be used. + All times are UTC. It is recommended to avoid 00:00 as base backup time, + to avoid overlapping with any other external operations happening at + this time. + + + If not set, full backups are never performed automatically. + + ' + displayName: Configurations Backups Cron Schedule + path: configurations.backups.cronSchedule + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Configurations Backups Performance Max Network Bandwidth + path: configurations.backups.performance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Configurations Backups Performance Max Disk Bandwidth + path: configurations.backups.performance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to store + the data. This parameter configures the number of parallel streams to + use to reading from disk. By default, it''s set to 1. + + ' + displayName: Configurations Backups Performance Upload Disk Concurrency + path: configurations.backups.performance.uploadDiskConcurrency + - description: 'Backup storage may use several concurrent streams to store + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to 16. + + ' + displayName: Configurations Backups Performance Upload Concurrency + path: configurations.backups.performance.uploadConcurrency + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Configurations Backups Performance Download Concurrency + path: configurations.backups.performance.downloadConcurrency + - description: 'When an automatic retention policy is defined to delete + old base backups, this parameter specifies the number of base backups + to keep, in a sliding window. + + + Consequently, the time range covered by backups is `periodicity*retention`, + where `periodicity` is the separation between backups as specified by + the `cronSchedule` property. + + + Default is 5. + + ' + displayName: Configurations Backups Retention + path: configurations.backups.retention + - description: 'Name of the [SGObjectStorage](https://stackgres.io/doc/latest/reference/crd/sgobjectstorage) + to use for the cluster. + + + It defines the location in which the the backups will be stored. + + ' + displayName: Configurations Backups SGObjectStorage + path: configurations.backups.sgObjectStorage + - description: "The path were the backup is stored. If not set this field\ + \ is filled up by the operator.\n\nWhen provided will indicate were\ + \ the backups and WAL files will be stored.\n\n> **WARNING**: Most users\ + \ should leave this field empty since having it manually set could be\ + \ dangerous. If the value is repeated due to re-creating an SGCluster\ + \ or\n re-using the same value in another SGCluster and you may get\ + \ a mixed WAL history with unrecoverable backups.\n" + displayName: Configurations Backups Path + path: configurations.backups.path + - description: 'If specified SGBackup will use VolumeSnapshot to create + backups. + + + This functionality still require to store WAL files in an SGObjectStorage + but could result in much faster backups and restore of those backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + displayName: Configurations Backups Use Volume Snapshot + path: configurations.backups.useVolumeSnapshot + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The name of the VolumeSnaphostClass to use to create the + VolumeSnapshot for backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + displayName: Configurations Backups Volume Snapshot Class + path: configurations.backups.volumeSnapshotClass + - description: 'If specified SGBackup will create a backup forcing a fast + start (by setting parameter `fast` to `true` when calling `pg_backup_start`) + that will reduce the time the backups may take at the expense of more + IO usage. + + + See also https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-LOWLEVEL-BASE-BACKUP + + ' + displayName: Configurations Backups Fast Volume Snapshot + path: configurations.backups.fastVolumeSnapshot + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation will continue + until the backup completes or fail. If set to 0 is the same as not being + set. + + + Make sure to set a reasonable high value in order to allow for any unexpected + delays during backup creation (network low bandwidth, disk low throughput + and so forth). + + ' + displayName: Configurations Backups Timeout + path: configurations.backups.timeout + - description: "Allow to set a timeout for the reconciliation process that\ + \ take place after the backup.\n\nIf not set defaults to 300 (5 minutes).\ + \ If set to 0 it will disable timeout.\n\nFailure of reconciliation\ + \ will not make the backup fail and will be re-tried the next time a\ + \ SGBackup\n or shecduled backup Job take place.\n" + displayName: Configurations Backups Reconciliation Timeout + path: configurations.backups.reconciliationTimeout + - description: 'The maximum number of retries the backup operation is allowed + to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: `3`. + + ' + displayName: Configurations Backups Max Retries + path: configurations.backups.maxRetries + - description: 'If specified, WAL created after any unmanaged lifecycle + backups will be retained. + + ' + displayName: Configurations Backups Retain Wals For Unmanaged Lifecycle + path: configurations.backups.retainWalsForUnmanagedLifecycle + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Patroni Rest Api Password Name + path: configurations.credentials.patroni.restApiPassword.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Patroni Rest Api Password Key + path: configurations.credentials.patroni.restApiPassword.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Superuser Username Name + path: configurations.credentials.users.superuser.username.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Superuser Username Key + path: configurations.credentials.users.superuser.username.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Superuser Password Name + path: configurations.credentials.users.superuser.password.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Superuser Password Key + path: configurations.credentials.users.superuser.password.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Replication Username Name + path: configurations.credentials.users.replication.username.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Replication Username Key + path: configurations.credentials.users.replication.username.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Replication Password Name + path: configurations.credentials.users.replication.password.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Replication Password Key + path: configurations.credentials.users.replication.password.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Authenticator Username Name + path: configurations.credentials.users.authenticator.username.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Authenticator Username Key + path: configurations.credentials.users.authenticator.username.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Authenticator Password Name + path: configurations.credentials.users.authenticator.password.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Authenticator Password Key + path: configurations.credentials.users.authenticator.password.key + - description: It's the reference of custom provider name. If not specified, + then the default value will be `stackgres` + displayName: Configurations Binding Provider + path: configurations.binding.provider + - description: Allow to specify the database name. If not specified, then + the default value is `postgres` + displayName: Configurations Binding Database + path: configurations.binding.database + - description: Allow to specify the username. If not specified, then the + superuser username will be used. + displayName: Configurations Binding Username + path: configurations.binding.username + - description: The name of the Secret + displayName: Configurations Binding Password Name + path: configurations.binding.password.name + - description: The key of the Secret + displayName: Configurations Binding Password Key + path: configurations.binding.password.key + - description: If true, when any entry of any `SGScript` fail will not prevent + subsequent `SGScript` from being executed. By default is `false`. + displayName: Managed Sql Continue On SG Script Error + path: managedSql.continueOnSGScriptError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The id is immutable and must be unique across all the `SGScript` + entries. It is replaced by the operator and is used to identify the + `SGScript` entry. + + ' + displayName: Managed Sql Scripts Id + path: managedSql.scripts.id + - description: A reference to an `SGScript` + displayName: Managed Sql Scripts SGScript + path: managedSql.scripts.sgScript + - description: "When set to the UID of an existing [SGBackup](https://stackgres.io/doc/latest/reference/crd/sgbackup),\ + \ the cluster is initialized by restoring the\n backup data to it.\ + \ If not set, the cluster is initialized empty. This field is deprecated.\n" + displayName: Initial Data Restore From Backup Uid + path: initialData.restore.fromBackup.uid + - description: "When set to the name of an existing [SGBackup](https://stackgres.io/doc/latest/reference/crd/sgbackup),\ + \ the cluster is initialized by restoring the\n backup data to it.\ + \ If not set, the cluster is initialized empty. The selected backup\ + \ must be in the same namespace.\n" + displayName: Initial Data Restore From Backup Name + path: initialData.restore.fromBackup.name + - description: "Specify the [recovery_target](https://postgresqlco.nf/doc/en/param/recovery_target/)\ + \ that specifies that recovery should end as soon as a consistent\n\ + \ state is reached, i.e., as early as possible. When restoring from\ + \ an online backup, this means the point where taking the backup ended.\n\ + \n Technically, this is a string parameter, but 'immediate' is currently\ + \ the only allowed value.\n" + displayName: Initial Data Restore From Backup Target + path: initialData.restore.fromBackup.target + - description: "Specify the [recovery_target_timeline](https://postgresqlco.nf/doc/en/param/recovery_target_timeline/)\ + \ to recover into a particular timeline.\n The default is to recover\ + \ along the same timeline that was current when the base backup was\ + \ taken. Setting this to latest recovers to the latest\n timeline found\ + \ in the archive, which is useful in a standby server. Other than that\ + \ you only need to set this parameter in complex re-recovery\n situations,\ + \ where you need to return to a state that itself was reached after\ + \ a point-in-time recovery.\n" + displayName: Initial Data Restore From Backup Target Timeline + path: initialData.restore.fromBackup.targetTimeline + - description: "Specify the [recovery_target_inclusive](https://postgresqlco.nf/doc/en/param/recovery_target_timeline/)\ + \ to stop recovery just after the specified\n recovery target (true),\ + \ or just before the recovery target (false). Applies when targetLsn,\ + \ pointInTimeRecovery, or targetXid is specified. This\n setting controls\ + \ whether transactions having exactly the target WAL location (LSN),\ + \ commit time, or transaction ID, respectively, will be included\n \ + \ in the recovery. Default is true.\n" + displayName: Initial Data Restore From Backup Target Inclusive + path: initialData.restore.fromBackup.targetInclusive + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "[recovery_target_name](https://postgresqlco.nf/doc/en/param/recovery_target_name/)\ + \ specifies the named restore point\n (created with pg_create_restore_point())\ + \ to which recovery will proceed.\n" + displayName: Initial Data Restore From Backup Target Name + path: initialData.restore.fromBackup.targetName + - description: "[recovery_target_xid](https://postgresqlco.nf/doc/en/param/recovery_target_xid/)\ + \ specifies the transaction ID up to which recovery will proceed.\n\ + \ Keep in mind that while transaction IDs are assigned sequentially\ + \ at transaction start, transactions can complete in a different numeric\ + \ order.\n The transactions that will be recovered are those that committed\ + \ before (and optionally including) the specified one. The precise stopping\ + \ point\n is also influenced by targetInclusive.\n" + displayName: Initial Data Restore From Backup Target Xid + path: initialData.restore.fromBackup.targetXid + - description: "[recovery_target_lsn](https://postgresqlco.nf/doc/en/param/recovery_target_lsn/)\ + \ specifies the LSN of the write-ahead log location up to which\n recovery\ + \ will proceed. The precise stopping point is also influenced by targetInclusive.\ + \ This parameter is parsed using the system data type\n pg_lsn.\n" + displayName: Initial Data Restore From Backup Target Lsn + path: initialData.restore.fromBackup.targetLsn + - description: 'An ISO 8601 date, that holds UTC date indicating at which + point-in-time the database have to be restored. + + ' + displayName: Initial Data Restore From Backup Point In Time Recovery Restore + To Timestamp + path: initialData.restore.fromBackup.pointInTimeRecovery.restoreToTimestamp + - description: 'The backup fetch process may fetch several streams in parallel. + Parallel fetching is enabled when set to a value larger than one. + + + If not specified it will be interpreted as latest. + + ' + displayName: Initial Data Restore Download Disk Concurrency + path: initialData.restore.downloadDiskConcurrency + - description: 'Name of the script. Must be unique across this SGCluster. + + ' + displayName: Initial Data Scripts Name + path: initialData.scripts.name + - description: 'Database where the script is executed. Defaults to the `postgres` + database, if not specified. + + ' + displayName: Initial Data Scripts Database + path: initialData.scripts.database + - description: 'Raw SQL script to execute. This field is mutually exclusive + with `scriptFrom` field. + + ' + displayName: Initial Data Scripts Script + path: initialData.scripts.script + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Initial Data Scripts Script From Secret Key Ref Name + path: initialData.scripts.scriptFrom.secretKeyRef.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Initial Data Scripts Script From Secret Key Ref Key + path: initialData.scripts.scriptFrom.secretKeyRef.key + - description: 'The name of the ConfigMap that contains the SQL script to + execute. + + ' + displayName: Initial Data Scripts Script From Config Map Key Ref Name + path: initialData.scripts.scriptFrom.configMapKeyRef.name + - description: 'The key name within the ConfigMap that contains the SQL + script to execute. + + ' + displayName: Initial Data Scripts Script From Config Map Key Ref Key + path: initialData.scripts.scriptFrom.configMapKeyRef.key + - description: 'Configure replication from an SGCluster. + + ' + displayName: Replicate From Instance SGCluster Reference + path: replicateFrom.instance.sgCluster + - description: The host of the PostgreSQL to replicate from. + displayName: Replicate From Instance External Host + path: replicateFrom.instance.external.host + - description: The port of the PostgreSQL to replicate from. + displayName: Replicate From Instance External Port + path: replicateFrom.instance.external.port + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Replicate From Storage Performance Max Network Bandwidth + path: replicateFrom.storage.performance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Replicate From Storage Performance Max Disk Bandwidth + path: replicateFrom.storage.performance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Replicate From Storage Performance Download Concurrency + path: replicateFrom.storage.performance.downloadConcurrency + - description: The SGObjectStorage name to replicate from. + displayName: Replicate From Storage SGObjectStorage + path: replicateFrom.storage.sgObjectStorage + - description: The path in the SGObjectStorage to replicate from. + displayName: Replicate From Storage Path + path: replicateFrom.storage.path + - description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + displayName: Replicate From Users Superuser Username + path: replicateFrom.users.superuser.username + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Replicate From Users Superuser Username Name + path: replicateFrom.users.superuser.username.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Replicate From Users Superuser Username Key + path: replicateFrom.users.superuser.username.key + - description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + displayName: Replicate From Users Superuser Password + path: replicateFrom.users.superuser.password + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Replicate From Users Superuser Password Name + path: replicateFrom.users.superuser.password.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Replicate From Users Superuser Password Key + path: replicateFrom.users.superuser.password.key + - description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + displayName: Replicate From Users Replication Username + path: replicateFrom.users.replication.username + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Replicate From Users Replication Username Name + path: replicateFrom.users.replication.username.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Replicate From Users Replication Username Key + path: replicateFrom.users.replication.username.key + - description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + displayName: Replicate From Users Replication Password + path: replicateFrom.users.replication.password + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Replicate From Users Replication Password Name + path: replicateFrom.users.replication.password.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Replicate From Users Replication Password Key + path: replicateFrom.users.replication.password.key + - description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + displayName: Replicate From Users Authenticator Username + path: replicateFrom.users.authenticator.username + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Replicate From Users Authenticator Username Name + path: replicateFrom.users.authenticator.username.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Replicate From Users Authenticator Username Key + path: replicateFrom.users.authenticator.username.key + - description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + displayName: Replicate From Users Authenticator Password + path: replicateFrom.users.authenticator.password + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Replicate From Users Authenticator Password Name + path: replicateFrom.users.authenticator.password.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Replicate From Users Authenticator Password Key + path: replicateFrom.users.authenticator.password.key + - description: '**Deprecated** use instead .spec.configurations.observability.prometheusAutobind. + + ' + displayName: Prometheus Autobind + path: prometheusAutobind + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It is a best practice, on non-containerized environments, + when running production workloads, to run each database server on a + different server (virtual or physical), i.e., not to co-locate more + than one database server per host. + + + The same best practice applies to databases on containers. By default, + StackGres will not allow to run more than one StackGres pod on a given + Kubernetes node. Set this property to true to allow more than one StackGres + pod per node. + + + This property default value may be changed depending on the value of + field `.spec.profile`. + + + **Changing this field may require a restart.** + + ' + displayName: Non Production Options Disable Cluster Pod Anti Affinity + path: nonProductionOptions.disableClusterPodAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It is a best practice, on containerized environments, when + running production workloads, to enforce container''s resources requirements. + + + The same best practice applies to databases on containers. By default, + StackGres will configure resource requirements for patroni container. + Set this property to true to prevent StackGres from setting patroni + container''s resources requirement. + + + This property default value may be changed depending on the value of + field `.spec.profile`. + + + **Changing this field may require a restart.** + + ' + displayName: Non Production Options Disable Patroni Resource Requirements + path: nonProductionOptions.disablePatroniResourceRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It is a best practice, on containerized environments, when + running production workloads, to enforce container''s resources requirements. + + + By default, StackGres will configure resource requirements for all the + containers. Set this property to true to prevent StackGres from setting + container''s resources requirements (except for patroni container, see + `disablePatroniResourceRequirements`). + + + This property default value may be changed depending on the value of + field `.spec.profile`. + + + **Changing this field may require a restart.** + + ' + displayName: Non Production Options Disable Cluster Resource Requirements + path: nonProductionOptions.disableClusterResourceRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's cpu requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ cpu than it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs on the node.\n\ + \nBy default, StackGres will configure cpu requirements to have the\ + \ same limit and request for the patroni container. Set this property\ + \ to true to prevent StackGres from setting patroni container's cpu\ + \ requirements request equals to the limit\n when `.spec.requests.cpu`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Non Production Options Enable Set Patroni Cpu Requests + path: nonProductionOptions.enableSetPatroniCpuRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's cpu requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ cpu than it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs on the node.\n\ + \nBy default, StackGres will configure cpu requirements to have the\ + \ same limit and request for all the containers. Set this property to\ + \ true to prevent StackGres from setting container's cpu requirements\ + \ request equals to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..cpu` `.spec.requests.initContainers..cpu` is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + displayName: Non Production Options Enable Set Cluster Cpu Requests + path: nonProductionOptions.enableSetClusterCpuRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's memory requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ memory than it requires.\n\nBy default, StackGres will configure memory\ + \ requirements to have the same limit and request for the patroni container.\ + \ Set this property to true to prevent StackGres from setting patroni\ + \ container's memory requirements request equals to the limit\n when\ + \ `.spec.requests.memory` is configured in the referenced `SGInstanceProfile`.\n\ + \n**Changing this field may require a restart.**\n" + displayName: Non Production Options Enable Set Patroni Memory Requests + path: nonProductionOptions.enableSetPatroniMemoryRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's memory requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ memory than it requires.\n\nBy default, StackGres will configure memory\ + \ requirements to have the same limit and request for all the containers.\ + \ Set this property to true to prevent StackGres from setting container's\ + \ memory requirements request equals to the limit (except for patroni\ + \ container, see `enablePatroniCpuRequests`)\n when `.spec.requests.containers..memory` `.spec.requests.initContainers..memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Non Production Options Enable Set Cluster Memory Requests + path: nonProductionOptions.enableSetClusterMemoryRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The name of the fature gate to enable. + displayName: Non Production Options Enabled Feature Gates + path: nonProductionOptions.enabledFeatureGates + - description: 'Name of the [SGDistributedLogs](https://stackgres.io/doc/latest/reference/crd/sgdistributedlogs/) + to use for this cluster. It must exist. + + ' + displayName: SGDistributedLogs Reference + path: distributedLogs.sgDistributedLogs + - description: "Define a retention window with the syntax ` (minutes|hours|days|months)`\ + \ in which log entries are kept.\n Log entries will be removed when\ + \ they get older more than the double of the specified retention window.\n\ + \nWhen this field is changed the retention will be applied only to log\ + \ entries that are newer than the end of\n the retention window previously\ + \ specified. If no retention window was previously specified it is considered\n\ + \ to be of 7 days. This means that if previous retention window is\ + \ of `7 days` new retention configuration will\n apply after UTC timestamp\ + \ calculated with: `SELECT date_trunc('days', now() at time zone 'UTC')\ + \ - INTERVAL '7 days'`.\n" + displayName: Distributed Logs Retention + path: distributedLogs.retention + - description: The name of the extension to install. + displayName: To Install Postgres Extensions Name + path: toInstallPostgresExtensions.name + - description: The id of the publisher of the extension to install. + displayName: To Install Postgres Extensions Publisher + path: toInstallPostgresExtensions.publisher + - description: The version of the extension to install. + displayName: To Install Postgres Extensions Version + path: toInstallPostgresExtensions.version + - description: The repository base URL from where the extension will be + installed from. + displayName: To Install Postgres Extensions Repository + path: toInstallPostgresExtensions.repository + - description: The postgres major version of the extension to install. + displayName: To Install Postgres Extensions Postgres Version + path: toInstallPostgresExtensions.postgresVersion + - description: The build version of the extension to install. + displayName: To Install Postgres Extensions Build + path: toInstallPostgresExtensions.build + - description: The extra mount of the installed extension. + displayName: To Install Postgres Extensions Extra Mounts + path: toInstallPostgresExtensions.extraMounts + statusDescriptors: + - description: Actual number of instances for the StackGres cluster. Each + instance is a Pod containing one Postgres server. + displayName: Instances + path: instances + - description: Actual label selector for instances for the StackGres cluster's + Pods to be used by autoscaling. + displayName: Label Selector + path: labelSelector + - description: Indicates the latest failed backup for the replication initialization. + displayName: Replication Initialization Failed SG Backup + path: replicationInitializationFailedSGBackup + - displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + - description: Last time the condition transitioned from one status to another. + displayName: Conditions Last Transition Time + path: conditions.lastTransitionTime + - description: A human readable message indicating details about the transition. + displayName: Conditions Message + path: conditions.message + - description: The reason for the condition's last transition. + displayName: Conditions Reason + path: conditions.reason + - description: Status of the condition, one of True, False, Unknown. + displayName: Conditions Status + path: conditions.status + - description: Type of deployment condition. + displayName: Conditions Type + path: conditions.type + - description: The name of the pod. + displayName: Pod Statuses Name + path: podStatuses.name + - description: Indicates the replication group this Pod belongs to. + displayName: Pod Statuses Replication Group + path: podStatuses.replicationGroup + - description: Indicates if the pod is the elected primary + displayName: Pod Statuses Primary + path: podStatuses.primary + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Indicates if the pod requires restart + displayName: Pod Statuses Pending Restart + path: podStatuses.pendingRestart + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The name of the installed extension. + displayName: Pod Statuses Installed Postgres Extensions Name + path: podStatuses.installedPostgresExtensions.name + - description: The id of the publisher of the installed extension. + displayName: Pod Statuses Installed Postgres Extensions Publisher + path: podStatuses.installedPostgresExtensions.publisher + - description: The version of the installed extension. + displayName: Pod Statuses Installed Postgres Extensions Version + path: podStatuses.installedPostgresExtensions.version + - description: The repository base URL from where the extension was installed + from. + displayName: Pod Statuses Installed Postgres Extensions Repository + path: podStatuses.installedPostgresExtensions.repository + - description: The postgres major version of the installed extension. + displayName: Pod Statuses Installed Postgres Extensions Postgres Version + path: podStatuses.installedPostgresExtensions.postgresVersion + - description: The build version of the installed extension. + displayName: Pod Statuses Installed Postgres Extensions Build + path: podStatuses.installedPostgresExtensions.build + - description: The extra mount of the installed extension. + displayName: Pod Statuses Installed Postgres Extensions Extra Mounts + path: podStatuses.installedPostgresExtensions.extraMounts + - displayName: Db Ops Major Version Upgrade Initial Instances + path: dbOps.majorVersionUpgrade.initialInstances + - description: 'The primary instance that this operation is targetting + + ' + displayName: Db Ops Major Version Upgrade Primary Instance + path: dbOps.majorVersionUpgrade.primaryInstance + - description: 'The source PostgreSQL version + + ' + displayName: Db Ops Major Version Upgrade Source Postgres Version + path: dbOps.majorVersionUpgrade.sourcePostgresVersion + - description: The name of the extension to deploy. + displayName: Db Ops Major Version Upgrade Source Postgres Extensions Name + path: dbOps.majorVersionUpgrade.sourcePostgresExtensions.name + - description: The id of the publisher of the extension to deploy. If not + specified `com.ongres` will be used by default. + displayName: Db Ops Major Version Upgrade Source Postgres Extensions Publisher + path: dbOps.majorVersionUpgrade.sourcePostgresExtensions.publisher + - description: The version of the extension to deploy. If not specified + version of `stable` channel will be used by default and if only a version + is available that one will be used. + displayName: Db Ops Major Version Upgrade Source Postgres Extensions Version + path: dbOps.majorVersionUpgrade.sourcePostgresExtensions.version + - description: 'The repository base URL from where to obtain the extension + to deploy. + + ' + displayName: Db Ops Major Version Upgrade Source Postgres Extensions Repository + path: dbOps.majorVersionUpgrade.sourcePostgresExtensions.repository + - description: 'The source SGPostgresConfig reference + + ' + displayName: Db Ops Major Version Upgrade Source SGPostgresConfig + path: dbOps.majorVersionUpgrade.sourceSgPostgresConfig + - description: 'The source backup path + + ' + displayName: Db Ops Major Version Upgrade Source Backup Path + path: dbOps.majorVersionUpgrade.sourceBackupPath + - description: 'The target PostgreSQL version + + ' + displayName: Db Ops Major Version Upgrade Target Postgres Version + path: dbOps.majorVersionUpgrade.targetPostgresVersion + - description: 'The PostgreSQL locale + + ' + displayName: Db Ops Major Version Upgrade Locale + path: dbOps.majorVersionUpgrade.locale + - description: 'The PostgreSQL encoding + + ' + displayName: Db Ops Major Version Upgrade Encoding + path: dbOps.majorVersionUpgrade.encoding + - description: 'Indicates if PostgreSQL data checksum is enabled + + ' + displayName: Db Ops Major Version Upgrade Data Checksum + path: dbOps.majorVersionUpgrade.dataChecksum + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Use `--link` option when running `pg_upgrade` + + ' + displayName: Db Ops Major Version Upgrade Link + path: dbOps.majorVersionUpgrade.link + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Use `--clone` option when running `pg_upgrade` + + ' + displayName: Db Ops Major Version Upgrade Clone + path: dbOps.majorVersionUpgrade.clone + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Run `pg_upgrade` with check option instead of performing + the real upgrade + + ' + displayName: Db Ops Major Version Upgrade Check + path: dbOps.majorVersionUpgrade.check + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Indicates to rollback from a previous major version upgrade + + ' + displayName: Db Ops Major Version Upgrade Rollback + path: dbOps.majorVersionUpgrade.rollback + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Db Ops Restart Initial Instances + path: dbOps.restart.initialInstances + - description: 'The primary instance that this operation is targetting + + ' + displayName: Db Ops Restart Primary Instance + path: dbOps.restart.primaryInstance + - displayName: Db Ops Minor Version Upgrade Initial Instances + path: dbOps.minorVersionUpgrade.initialInstances + - description: 'The primary instance that this operation is targetting + + ' + displayName: Db Ops Minor Version Upgrade Primary Instance + path: dbOps.minorVersionUpgrade.primaryInstance + - description: 'Postgres version that is currently running on the cluster + + ' + displayName: Db Ops Minor Version Upgrade Source Postgres Version + path: dbOps.minorVersionUpgrade.sourcePostgresVersion + - description: 'The desired Postgres version for the cluster + + ' + displayName: Db Ops Minor Version Upgrade Target Postgres Version + path: dbOps.minorVersionUpgrade.targetPostgresVersion + - displayName: Db Ops Security Upgrade Initial Instances + path: dbOps.securityUpgrade.initialInstances + - description: 'The primary instance that this operation is targetting + + ' + displayName: Db Ops Security Upgrade Primary Instance + path: dbOps.securityUpgrade.primaryInstance + - description: The architecture on which the cluster has been initialized. + displayName: Arch + path: arch + - description: The operative system on which the cluster has been initialized. + displayName: Os + path: os + - description: The custom prefix that is prepended to all labels. + displayName: Label Prefix + path: labelPrefix + - description: Identify the associated `SGScript` entry with the same value + in the `id` field. + displayName: Managed Sql Scripts Id + path: managedSql.scripts.id + - description: ISO-8601 datetime of when the script execution has been started. + displayName: Managed Sql Scripts Started At + path: managedSql.scripts.startedAt + - description: ISO-8601 datetime of when the last script execution occurred. + Will be reset each time the referenced `SGScripts` entry will be applied. + displayName: Managed Sql Scripts Updated At + path: managedSql.scripts.updatedAt + - description: ISO-8601 datetime of when the script execution had failed + (mutually exclusive with `completedAt`). + displayName: Managed Sql Scripts Failed At + path: managedSql.scripts.failedAt + - description: ISO-8601 datetime of when the script execution had completed + (mutually exclusive with `failedAt`). + displayName: Managed Sql Scripts Completed At + path: managedSql.scripts.completedAt + - description: Identify the associated script entry with the same value + in the `id` field. + displayName: Managed Sql Scripts Scripts Id + path: managedSql.scripts.scripts.id + - description: The latest version applied + displayName: Managed Sql Scripts Scripts Version + path: managedSql.scripts.scripts.version + - description: Indicates the number of intents or failures occurred + displayName: Managed Sql Scripts Scripts Intents + path: managedSql.scripts.scripts.intents + - description: If failed, the error code of the failure. See also https://www.postgresql.org/docs/current/errcodes-appendix.html + displayName: Managed Sql Scripts Scripts Failure Code + path: managedSql.scripts.scripts.failureCode + - description: If failed, a message of the failure + displayName: Managed Sql Scripts Scripts Failure + path: managedSql.scripts.scripts.failure + - description: The name of the Secret as specified in [Service Binding spec + for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service). + displayName: Binding Name + path: binding.name + version: v1 + - description: Operator configuration for OLM-based installations (equivalent + to values.yaml with Helm) + displayName: StackGres Operator Configuration + kind: SGConfig + name: sgconfigs.stackgres.io + specDescriptors: + - description: 'The container registry host (and port) where the images + will be pulled from. + + + > This value can only be set in operator helm chart or with the environment + variable `SG_CONTAINER_REGISTRY`. + + ' + displayName: Container Registry + path: containerRegistry + - description: Image pull policy used for images loaded by the Operator + displayName: Image Pull Policy + path: imagePullPolicy + - description: The name of the referenced Secret. + displayName: Image Pull Secrets Name + path: imagePullSecrets.name + - description: 'A namespace that the operator is allowed to use. + + ' + displayName: Allowed Namespaces + path: allowedNamespaces + - displayName: Allowed Namespace Label Selector + path: allowedNamespaceLabelSelector + - description: "When set to `true` the creation of the operator ClusterRole\ + \ and ClusterRoleBinding is disabled.\n Also, when `true`, some features\ + \ that rely on unnamespaced resources premissions will be disabled:\n\ + \n* Creation and upgrade of CustomResourceDefinitions\n* Set CA bundle\ + \ for Webhooks\n* Check existence of CustomResourceDefinition when listing\ + \ custom resources\n* Validation of StorageClass\n* REST API endpoint\ + \ `can-i/{verb}/{resource}` and `can-i` will always return the full\ + \ list of permissions for any resource and verb since they rely on creation\ + \ of subjectaccessreviews unnamespaced resource that requires a cluster\ + \ role.\n* Other REST API endpoints will not work since they rely on\ + \ impersonation that requires a cluster role.\n This point in particular\ + \ breaks the Web Console completely. You may still enable this specific\ + \ cluster role with `.allowImpersonationForRestApi`.\n If you do not\ + \ need the Web Console you may still disable it completely by setting\ + \ `.deploy.restapi` to `false`.\n\nWhen set to `true` and `allowedNamespaces`\ + \ is not set or is empty then `allowedNamespaces` will be considered\ + \ set and containing only the namespace of the operator.\n\nIt is `false`\ + \ by default.\n\n> This value can only be set in operator helm chart.\n" + displayName: Disable Cluster Role + path: disableClusterRole + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When set to `true` the cluster role for impersonation will + be created even if `disableClusterRole` is set to `true`. + + + It is `false` by default. + + + > This value can only be set in operator helm chart. + + ' + displayName: Allow Impersonation For Rest Api + path: allowImpersonationForRestApi + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When set to `true` the cluster role to update or patch CRDs + will be disabled. + + + It is `false` by default. + + + > This value can only be set in operator helm chart. + + ' + displayName: Disable Crds And Webhooks Update + path: disableCrdsAndWebhooksUpdate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When set will indicate the namespace where the SGConfig + used by the operator will be created. + + + By default the SGConfig will be created in the same namespace as the + operator. + + + > This value can only be set in operator helm chart. + + ' + displayName: SGConfig Namespace + path: sgConfigNamespace + - description: 'If `true` the Operator Installation ServiceAccount will + be created + + + > This value can only be set in operator helm chart. + + ' + displayName: Service Account Create + path: serviceAccount.create + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Repository credentials Secret name + + + > This value can only be set in operator helm chart. + + ' + displayName: Service Account Repo Credentials + path: serviceAccount.repoCredentials + - description: 'Operator image name + + + > This value can only be set in operator helm chart. + + ' + displayName: Operator Image Name + path: operator.image.name + - description: 'Operator image tag + + + > This value can only be set in operator helm chart. + + ' + displayName: Operator Image Tag + path: operator.image.tag + - description: 'Operator image pull policy + + + > This value can only be set in operator helm chart. + + ' + displayName: Operator Image Pull Policy + path: operator.image.pullPolicy + - displayName: Operator Service Account Repo Credentials + path: operator.serviceAccount.repoCredentials + - description: REST API Deployment name + displayName: Restapi Name + path: restapi.name + - description: REST API image name + displayName: Restapi Image Name + path: restapi.image.name + - description: REST API image tag + displayName: Restapi Image Tag + path: restapi.image.tag + - description: REST API image pull policy + displayName: Restapi Image Pull Policy + path: restapi.image.pullPolicy + - description: Repository credentials Secret name + displayName: Restapi Service Account Repo Credentials + path: restapi.serviceAccount.repoCredentials + - description: Web Console image name + displayName: Adminui Image Name + path: adminui.image.name + - description: Web Console image tag + displayName: Adminui Image Tag + path: adminui.image.tag + - description: Web Console image pull policy + displayName: Adminui Image Pull Policy + path: adminui.image.pullPolicy + - description: When set to `true` the HTTP port will be exposed in the Web + Console Service + displayName: Adminui Service Expose HTTP + path: adminui.service.exposeHTTP + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "The type used for the service of the UI:\n* Set to LoadBalancer\ + \ to create a load balancer (if supported by the kubernetes cluster)\n\ + \ to allow connect from Internet to the UI. Note that enabling this\ + \ feature will probably incurr in\n some fee that depend on the host\ + \ of the kubernetes cluster (for example this is true for EKS, GKE\n\ + \ and AKS).\n* Set to NodePort to expose admin UI from kubernetes nodes.\n" + displayName: Adminui Service Type + path: adminui.service.type + - description: 'LoadBalancer will get created with the IP specified in + + this field. This feature depends on whether the underlying cloud-provider + supports specifying + + the loadBalancerIP when a load balancer is created. This field will + be ignored if the + + cloud-provider does not support the feature. + + ' + displayName: Adminui Service Load Balancer IP + path: adminui.service.loadBalancerIP + - displayName: Adminui Service Load Balancer Source Ranges + path: adminui.service.loadBalancerSourceRanges + - description: The HTTPS port used to expose the Service on Kubernetes nodes + displayName: Adminui Service Node Port + path: adminui.service.nodePort + - description: The HTTP port used to expose the Service on Kubernetes nodes + displayName: Adminui Service Node Port HTTP + path: adminui.service.nodePortHTTP + - description: OpenTelemetry Collector Deploymnet/Deamonset base name + displayName: Collector Name + path: collector.name + - description: "When set to `true` it enables the creation of a set of OpenTelemetry\ + \ Collectors receivers\n that will be scraping from the SGCluster Pods\ + \ and allow to scale the observability\n architecture and a set of OpenTelemetry\ + \ Collectors exporters that exports those metrics\n to one or more configured\ + \ targets.\n" + displayName: Collector Receivers Enabled + path: collector.receivers.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When receivers are enabled indicates the number of OpenTelemetry\ + \ Collectors exporters that\n exports metrics to one or more configured\ + \ targets.\n" + displayName: Collector Receivers Exporters + path: collector.receivers.exporters + - description: The namespace of the SGCluster + displayName: Collector Receivers Deployments SGClusters Namespace + path: collector.receivers.deployments.sgClusters.namespace + - description: The name of the SGCluster + displayName: Collector Receivers Deployments SGClusters Name + path: collector.receivers.deployments.sgClusters.name + - displayName: Collector Receivers Deployments SGClusters Indexes + path: collector.receivers.deployments.sgClusters.indexes + - description: Repository credentials Secret name + displayName: Collector Service Account Repo Credentials + path: collector.serviceAccount.repoCredentials + - description: "If set to false or monitors is set automatic bind to Prometheus\n\ + \ created using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)\ + \ will be disabled.\n\nIf disabled the cluster will not be binded to\ + \ Prometheus automatically and will require manual configuration.\n\n\ + Will be ignored if monitors is set.\n" + displayName: Collector Prometheus Operator Allow Discovery + path: collector.prometheusOperator.allowDiscovery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The name of the Prometheus resource that will scrape from + the collector Pod pointing by default to the prometheus exporter + displayName: Collector Prometheus Operator Monitors Name + path: collector.prometheusOperator.monitors.name + - description: The namespace of the Prometheus resource that will scrape + from the collector Pod pointing by default to the prometheus exporter + displayName: Collector Prometheus Operator Monitors Namespace + path: collector.prometheusOperator.monitors.namespace + - description: The name of the PodMonitor + displayName: Collector Prometheus Operator Monitors Metadata Name + path: collector.prometheusOperator.monitors.metadata.name + - description: The namespace of the PodMonitor. Changing the namespace may + require configure the Prometheus CR properly in order to discover PodMonitor + in such namespace. + displayName: Collector Prometheus Operator Monitors Metadata Namespace + path: collector.prometheusOperator.monitors.metadata.namespace + - description: Operator Installation Jobs image name + displayName: Jobs Image Name + path: jobs.image.name + - description: Operator Installation Jobs image tag + displayName: Jobs Image Tag + path: jobs.image.tag + - description: Operator Installation Jobs image pull policy + displayName: Jobs Image Pull Policy + path: jobs.image.pullPolicy + - description: Repository credentials Secret name + displayName: Jobs Service Account Repo Credentials + path: jobs.serviceAccount.repoCredentials + - description: When set to `true` the Operator will be deployed. + displayName: Deploy Operator + path: deploy.operator + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: When set to `true` the Web Console / REST API will be deployed. + displayName: Deploy Restapi + path: deploy.restapi + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: When set to `true` the OpenTelemetry Collector will be deployed. + displayName: Deploy Collector + path: deploy.collector + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "If set to `true` the CertificateSigningRequest used to generate\ + \ the certificate used by\n Webhooks will be approved by the Operator\ + \ Installation Job.\n" + displayName: Cert Autoapprove + path: cert.autoapprove + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: When set to `true` the Operator certificate will be created. + displayName: Cert Create For Operator + path: cert.createForOperator + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: When set to `true` the Web Console / REST API certificate + will be created. + displayName: Cert Create For Web Api + path: cert.createForWebApi + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: When set to `true` the OpenTelemetry Collector certificate + will be created. + displayName: Cert Create For Collector + path: cert.createForCollector + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "The Secret name with the Operator Webhooks certificate issued\ + \ by the Kubernetes cluster CA\n of type kubernetes.io/tls. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" + displayName: Cert Secret Name + path: cert.secretName + - description: 'When set to `true` the Operator certificates will be regenerated + if `createForOperator` is set to `true`, and the certificate is expired + or invalid. + + ' + displayName: Cert Regenerate Cert + path: cert.regenerateCert + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The duration in days of the generated certificate for the + Operator after which it will expire and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + displayName: Cert Cert Duration + path: cert.certDuration + - description: "The Secret name with the Web Console / REST API certificate\n\ + \ of type kubernetes.io/tls. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" + displayName: Cert Web Secret Name + path: cert.webSecretName + - description: 'When set to `true` the Web Console / REST API certificates + will be regenerated if `createForWebApi` is set to `true`, and the certificate + is expired or invalid. + + ' + displayName: Cert Regenerate Web Cert + path: cert.regenerateWebCert + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When set to `true` the Web Console / REST API RSA key pair + will be regenerated if `createForWebApi` is set to `true`, and the certificate + is expired or invalid. + + ' + displayName: Cert Regenerate Web Rsa + path: cert.regenerateWebRsa + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The duration in days of the generated certificate for the + Web Console / REST API after which it will expire and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + displayName: Cert Web Cert Duration + path: cert.webCertDuration + - description: 'The duration in days of the generated RSA key pair for the + Web Console / REST API after which it will expire and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + displayName: Cert Web Rsa Duration + path: cert.webRsaDuration + - description: "The Secret name with the OpenTelemetry Collector certificate\n\ + \ of type kubernetes.io/tls. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" + displayName: Cert Collector Secret Name + path: cert.collectorSecretName + - description: 'When set to `true` the OpenTelemetry Collector certificates + will be regenerated if `createForCollector` is set to `true`, and the + certificate is expired or invalid. + + ' + displayName: Cert Regenerate Collector Cert + path: cert.regenerateCollectorCert + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The duration in days of the generated certificate for the + OpenTelemetry Collector after which it will expire and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + displayName: Cert Collector Cert Duration + path: cert.collectorCertDuration + - description: "When set to `true` then Issuer and Certificate for Operator,\ + \ Web Console / REST API and OpenTelemetry Collector\n Pods will be\ + \ generated\n" + displayName: Cert Cert Manager Auto Configure + path: cert.certManager.autoConfigure + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The requested duration (i.e. lifetime) of the Certificates. + See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io%2fv1 + displayName: Cert Cert Manager Duration + path: cert.certManager.duration + - description: How long before the currently issued certificate’s expiry + cert-manager should renew the certificate. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io%2fv1 + displayName: Cert Cert Manager Renew Before + path: cert.certManager.renewBefore + - description: The private key cryptography standards (PKCS) encoding for + this certificate’s private key to be encoded in. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey + displayName: Cert Cert Manager Encoding + path: cert.certManager.encoding + - description: Size is the key bit size of the corresponding private key + for this certificate. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey + displayName: Cert Cert Manager Size + path: cert.certManager.size + - description: "When set to `true` the admin user is assigned the `cluster-admin`\ + \ ClusterRole by creating\n ClusterRoleBinding.\n" + displayName: Rbac Create + path: rbac.create + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "Specify the authentication mechanism to use. By default\ + \ is `jwt`, see https://stackgres.io/doc/latest/api/rbac#local-secret-mechanism.\n\ + \ If set to `oidc` then see https://stackgres.io/doc/latest/api/rbac/#openid-connect-provider-mechanism.\n" + displayName: Authentication Type + path: authentication.type + - description: 'When `true` will create the secret used to store the admin + user credentials to access the UI. + + ' + displayName: Authentication Create Admin Secret + path: authentication.createAdminSecret + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The admin username that will be created for the Web Console + + + Operator bundle installation can not change the default value of this + field. + + ' + displayName: Authentication User + path: authentication.user + - description: 'The admin password that will be created for the Web Console. + + + If not specified a random password will be generated. + + ' + displayName: Authentication Password + path: authentication.password + - description: The name of the Secret. + displayName: Authentication Secret Ref Name + path: authentication.secretRef.name + - description: Can be one of `required`, `certificate-validation` or `none` + displayName: Authentication Oidc Tls Verification + path: authentication.oidc.tlsVerification + - displayName: Authentication Oidc Auth Server Url + path: authentication.oidc.authServerUrl + - displayName: Authentication Oidc Client Id + path: authentication.oidc.clientId + - displayName: Authentication Oidc Credentials Secret + path: authentication.oidc.credentialsSecret + - displayName: Authentication Oidc Client Id Secret Ref Name + path: authentication.oidc.clientIdSecretRef.name + - displayName: Authentication Oidc Client Id Secret Ref Key + path: authentication.oidc.clientIdSecretRef.key + - displayName: Authentication Oidc Credentials Secret Secret Ref Name + path: authentication.oidc.credentialsSecretSecretRef.name + - displayName: Authentication Oidc Credentials Secret Secret Ref Key + path: authentication.oidc.credentialsSecretSecretRef.key + - description: "If set to false disable automatic bind to Prometheus\n \ + \ created using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).\n\ + If disabled the cluster will not be binded to Prometheus automatically\ + \ and will require manual\n intervention by the Kubernetes cluster\ + \ administrator.\n" + displayName: Prometheus Allow Autobind + path: prometheus.allowAutobind + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` embed automatically Grafana into the\ + \ Web Console by creating the\n StackGres dashboard and the read-only\ + \ role used to read it from the Web Console \n" + displayName: Grafana Auto Embed + path: grafana.autoEmbed + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "The schema to access Grafana. By default http. (used to\ + \ embed manually and\n automatically grafana)\n" + displayName: Grafana Schema + path: grafana.schema + - description: "The service host name to access grafana (used to embed manually\ + \ and\n automatically Grafana). \nThe parameter value should point to\ + \ the grafana service following the \n [DNS reference](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/)\ + \ `svc_name.namespace`\n" + displayName: Grafana Web Host + path: grafana.webHost + - description: The datasource name used to create the StackGres Dashboard + into Grafana + displayName: Grafana Datasource Name + path: grafana.datasourceName + - description: "The username to access Grafana. By default admin. (used\ + \ to embed automatically\n Grafana)\n" + displayName: Grafana User + path: grafana.user + - description: "The password to access Grafana. By default prom-operator\ + \ (the default in for\n kube-prometheus-stack helm chart). (used to\ + \ embed automatically Grafana)\n" + displayName: Grafana Password + path: grafana.password + - description: "The namespace of secret with credentials to access Grafana.\ + \ (used to\n embed automatically Grafana, alternative to use `user`\ + \ and `password`)\n" + displayName: Grafana Secret Namespace + path: grafana.secretNamespace + - description: "The name of secret with credentials to access Grafana. (used\ + \ to embed\n automatically Grafana, alternative to use `user` and `password`)\n" + displayName: Grafana Secret Name + path: grafana.secretName + - description: "The key of secret with username used to access Grafana.\ + \ (used to embed\n automatically Grafana, alternative to use `user`\ + \ and `password`)\n" + displayName: Grafana Secret User Key + path: grafana.secretUserKey + - description: "The key of secret with password used to access Grafana.\ + \ (used to\n embed automatically Grafana, alternative to use `user`\ + \ and `password`)\n" + displayName: Grafana Secret Password Key + path: grafana.secretPasswordKey + - description: "The ConfigMap name with the dashboard JSON in the key `grafana-dashboard.json`\n\ + \ that will be created in Grafana. If not set the default\n" + displayName: Grafana Dashboard Config Map + path: grafana.dashboardConfigMap + - description: "The dashboard id that will be create in Grafana\n (see https://grafana.com/grafana/dashboards).\ + \ By default 9628. (used to embed automatically\n Grafana)\n\nManual\ + \ Steps:\n \nCreate grafana dashboard for postgres exporter and copy/paste\ + \ share URL:\n- Grafana > Create > Import > Grafana.com Dashboard 9628\n\ + Copy/paste grafana dashboard URL for postgres exporter:\n- Grafana >\ + \ Dashboard > Manage > Select postgres exporter dashboard > Copy URL\n" + displayName: Grafana Dashboard Id + path: grafana.dashboardId + - description: "The URL of the PostgreSQL dashboard created in Grafana (used\ + \ to embed manually\n Grafana)\n" + displayName: Grafana Url + path: grafana.url + - description: "The Grafana API token to access the PostgreSQL dashboard\ + \ created\n in Grafana (used to embed manually Grafana)\n\nManual Steps:\n\ + \ \nCreate and copy/paste grafana API token:\n- Grafana > Configuration\ + \ > API Keys > Add API key (for viewer) > Copy key value\n" + displayName: Grafana Token + path: grafana.token + - displayName: Extensions Repository Urls + path: extensions.repositoryUrls + - description: "When set to `true` enable the extensions cache.\n\nThis\ + \ feature is in beta and may cause failures, please use with caution\ + \ and report any\n error to https://gitlab.com/ongresinc/stackgres/-/issues/new\n" + displayName: Extensions Cache Enabled + path: extensions.cache.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: An extension pattern used to pre-loaded estensions into the + extensions cache + displayName: Extensions Cache Preloaded Extensions + path: extensions.cache.preloadedExtensions + - description: 'The PersistentVolume size for the extensions cache + + + Only use whole numbers (e.g. not 1e6) and K/Ki/M/Mi/G/Gi as units + + ' + displayName: Extensions Cache Persistent Volume Size + path: extensions.cache.persistentVolume.size + - description: "If defined set storage class\nIf set to \"-\" (equivalent\ + \ to storageClass: \"\" in a PV spec) disables\n dynamic provisioning\n\ + If undefined (the default) or set to null, no storageClass spec is\n\ + \ set, choosing the default provisioner. (gp2 on AWS, standard on\n\ + \ GKE, AWS & OpenStack)\n" + displayName: Extensions Cache Persistent Volume Storage Class + path: extensions.cache.persistentVolume.storageClass + - description: "If set, will use a host path volume with the specified path\ + \ for the extensions cache\n instead of a PersistentVolume\n" + displayName: Extensions Cache Host Path + path: extensions.cache.hostPath + - description: The namespace of the ServiceAccount used by ShardingSphere + operator + displayName: Sharding Sphere Service Account Namespace + path: shardingSphere.serviceAccount.namespace + - description: The name of the ServiceAccount used by ShardingSphere operator + displayName: Sharding Sphere Service Account Name + path: shardingSphere.serviceAccount.name + - description: Set the operator version (used for testing) + displayName: Developer Version + path: developer.version + - description: Set `quarkus.log.level`. See https://quarkus.io/guides/logging#root-logger-configuration + displayName: Developer Log Level + path: developer.logLevel + - description: If set to `true` add extra debug to any script controlled + by the reconciliation cycle of the operator configuration + displayName: Developer Show Debug + path: developer.showDebug + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Set `quarkus.log.console.format` to `%d{yyyy-MM-dd HH:mm:ss,SSS} + %-5p [%c{4.}] (%t) %s%e%n`. See https://quarkus.io/guides/logging#logging-format + displayName: Developer Show Stack Traces + path: developer.showStackTraces + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The operator will use JVM version of the images + + ' + displayName: Developer Use Jvm Images + path: developer.useJvmImages + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "Only work with JVM version and allow connect\n on port 8000\ + \ of operator Pod with jdb or similar\n" + displayName: Developer Enable Jvm Debug + path: developer.enableJvmDebug + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "Only work with JVM version and if `enableJvmDebug` is `true`\n\ + \ suspend the JVM until a debugger session is started\n" + displayName: Developer Enable Jvm Debug Suspend + path: developer.enableJvmDebugSuspend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Set the external Operator IP + displayName: Developer External Operator Ip + path: developer.externalOperatorIp + - description: Set the external Operator port + displayName: Developer External Operator Port + path: developer.externalOperatorPort + - description: Set the external REST API IP + displayName: Developer External Rest Api Ip + path: developer.externalRestApiIp + - description: Set the external REST API port + displayName: Developer External Rest Api Port + path: developer.externalRestApiPort + - description: "If set to `true` and `extensions.cache.enabled` is also\ + \ `true`\n it will try to download extensions from images (experimental)\n" + displayName: Developer Allow Pull Extensions From Image Repository + path: developer.allowPullExtensionsFromImageRepository + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It set to `true` disable arbitrary user that is set for + OpenShift clusters + + ' + displayName: Developer Disable Arbitrary User + path: developer.disableArbitraryUser + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + statusDescriptors: + - description: Last time the condition transitioned from one status to another. + displayName: Conditions Last Transition Time + path: conditions.lastTransitionTime + - description: A human readable message indicating details about the transition. + displayName: Conditions Message + path: conditions.message + - description: The reason for the condition's last transition. + displayName: Conditions Reason + path: conditions.reason + - description: Status of the condition, one of True, False, Unknown. + displayName: Conditions Status + path: conditions.status + - description: Type of deployment condition. + displayName: Conditions Type + path: conditions.type + - description: Latest version of the operator used to check for updates + displayName: Version + path: version + - description: Indicate when the old operator bundle resources has been + removed + displayName: Remove Old Operator Bundle Resources + path: removeOldOperatorBundleResources + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Grafana URL to StackGres dashboards preceded by the dashboard + name and a semicolon `:` + displayName: Grafana Urls + path: grafana.urls + - description: Grafana Token that allow to access dashboards + displayName: Grafana Token + path: grafana.token + - description: Grafana configuration hash + displayName: Grafana Config Hash + path: grafana.configHash + - description: Indicate the version to which existing CRs have been updated + to + displayName: Existing Cr Updated To Version + path: existingCrUpdatedToVersion + version: v1 + - description: Day 2 Operations, including upgrades, restarts, vacuum, repack, + etc + displayName: StackGres Database Operation + kind: SGDbOps + name: sgdbops.stackgres.io + specDescriptors: + - description: 'The name of SGCluster on which the operation will be performed. + + ' + displayName: Target SGCluster + path: sgCluster + - displayName: Scheduling Node Selector + path: scheduling.nodeSelector + - description: Effect indicates the taint effect to match. Empty means match + all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + displayName: Scheduling Tolerations Effect + path: scheduling.tolerations.effect + - description: Key is the taint key that the toleration applies to. Empty + means match all taint keys. If the key is empty, operator must be Exists; + this combination means to match all values and all keys. + displayName: Scheduling Tolerations Key + path: scheduling.tolerations.key + - description: Operator represents a key's relationship to the value. Valid + operators are Exists and Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate all taints of a particular + category. + displayName: Scheduling Tolerations Operator + path: scheduling.tolerations.operator + - description: TolerationSeconds represents the period of time the toleration + (which must be of effect NoExecute, otherwise this field is ignored) + tolerates the taint. By default, it is not set, which means tolerate + the taint forever (do not evict). Zero and negative values will be treated + as 0 (evict immediately) by the system. + displayName: Scheduling Tolerations Toleration Seconds + path: scheduling.tolerations.tolerationSeconds + - description: Value is the taint value the toleration matches to. If the + operator is Exists, the value should be empty, otherwise just a regular + string. + displayName: Scheduling Tolerations Value + path: scheduling.tolerations.value + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Expressions Key + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Expressions Operator + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Expressions Values + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Fields Key + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Fields Operator + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Fields Values + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Weight + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Expressions Key + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Expressions Operator + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Expressions Values + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Fields Key + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Fields Operator + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Fields Values + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Scheduling Priority Class Name + path: scheduling.priorityClassName + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Expressions + Key + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Expressions + Operator + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Expressions + Values + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Labels + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Match Label Keys + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Mismatch Label Keys + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Expressions + Key + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Expressions + Operator + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Expressions + Values + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Labels + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespaces + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Topology Key + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Weight + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Key + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Operator + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Values + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Labels + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Match Label Keys + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Mismatch Label Keys + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Key + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Operator + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Values + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Labels + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespaces + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Topology Key + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Key + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Operator + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Values + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Labels + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Match Label Keys + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Mismatch Label Keys + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Key + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Operator + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Values + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Labels + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespaces + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Topology Key + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Weight + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Key + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Operator + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Values + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Labels + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Match Label Keys + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Mismatch Label Keys + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Key + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Operator + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Values + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Labels + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespaces + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Topology Key + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: 'The kind of operation that will be performed on the SGCluster. + Available operations are: + + + * `benchmark`: run a benchmark on the specified SGCluster and report + the results in the status. + + * `vacuum`: perform a [vacuum](https://www.postgresql.org/docs/current/sql-vacuum.html) + operation on the specified SGCluster. + + * `repack`: run [`pg_repack`](https://github.com/reorg/pg_repack) command + on the specified SGCluster. + + * `majorVersionUpgrade`: perform a major version upgrade of PostgreSQL + using [`pg_upgrade`](https://www.postgresql.org/docs/current/pgupgrade.html) + command. + + * `restart`: perform a restart of the cluster. + + * `minorVersionUpgrade`: perform a minor version upgrade of PostgreSQL. + + * `securityUpgrade`: perform a security upgrade of the cluster. + + ' + displayName: Op + path: op + - description: 'An ISO 8601 date, that holds UTC scheduled date of the operation + execution. + + + If not specified or if the date it''s in the past, it will be interpreted + ASAP. + + ' + displayName: Run At + path: runAt + - description: 'An ISO 8601 duration in the format `PnDTnHnMn.nS`, that + specifies a timeout after which the operation execution will be canceled. + + + If the operation can not be performed due to timeout expiration, the + condition `Failed` will have a status of `True` and the reason will + be `OperationTimedOut`. + + + If not specified the operation will never fail for timeout expiration. + + ' + displayName: Timeout + path: timeout + - description: 'The maximum number of retries the operation is allowed to + do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: `0`. + + ' + displayName: Max Retries + path: maxRetries + - description: 'The type of benchmark that will be performed on the SGCluster. + Available benchmarks are: + + + * `pgbench`: run [pgbench](https://www.postgresql.org/docs/current/pgbench.html) + on the specified SGCluster and report the results in the status. + + * `sampling`: samples real queries and store them in the SGDbOps status + in order to be used by a `pgbench` benchmark using `replay` mode. + + ' + displayName: Benchmark Type + path: benchmark.type + - description: 'When specified will indicate the database where the benchmark + will run upon. + + + If not specified a target database with a random name will be created + and removed after the benchmark completes. + + ' + displayName: Benchmark Database + path: benchmark.database + - description: 'The Secret name where the username is stored. + + ' + displayName: Benchmark Credentials Username Name + path: benchmark.credentials.username.name + - description: 'The Secret key where the username is stored. + + ' + displayName: Benchmark Credentials Username Key + path: benchmark.credentials.username.key + - description: 'The Secret name where the password is stored. + + ' + displayName: Benchmark Credentials Password Name + path: benchmark.credentials.password.name + - description: 'The Secret key where the password is stored. + + ' + displayName: Benchmark Credentials Password Key + path: benchmark.credentials.password.key + - description: 'The target database to be sampled. By default `postgres`. + + + The benchmark database will be used to store the sampled queries but + user must specify a target database to be sampled in the `sampling` + section. + + ' + displayName: Benchmark Sampling Target Database + path: benchmark.sampling.targetDatabase + - description: An ISO 8601 duration in the format `PnDTnHnMn.nS`, that specifies + how long the to wait before selecting top queries in order to collect + enough stats. + displayName: Benchmark Sampling Top Queries Collect Duration + path: benchmark.sampling.topQueriesCollectDuration + - description: An ISO 8601 duration in the format `PnDTnHnMn.nS`, that specifies + how long will last the sampling of real queries that will be replayed + later. + displayName: Benchmark Sampling Sampling Duration + path: benchmark.sampling.samplingDuration + - description: 'The mode used to select the top queries used for sampling: + + + * `time`: The top queries will be selected among the most slow queries. + + * `calls`: The top queries will be selected among the most called queries. + + * `custom`: The `customTopQueriesQuery` will be used to select top queries. + + ' + displayName: Benchmark Sampling Mode + path: benchmark.sampling.mode + - description: Regular expression for filtering representative statements + when selecting top queries. Will be ignored if `mode` is set to `custom`. + By default is `^ *(with|select) `. See https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-POSIX-REGEXP + displayName: Benchmark Sampling Top Queries Filter + path: benchmark.sampling.topQueriesFilter + - description: Percentile of queries to consider as part of the top queries. + Will be ignored if `mode` is set to `custom`. By default `95`. + displayName: Benchmark Sampling Top Queries Percentile + path: benchmark.sampling.topQueriesPercentile + - description: Minimum number of queries to consider as part of the top + queries. By default `5`. + displayName: Benchmark Sampling Top Queries Min + path: benchmark.sampling.topQueriesMin + - description: 'The query used to select top queries. Will be ignored if + `mode` is not set to `custom`. + + + The query must return at most 2 columns: + + + * First column returned by the query must be a column holding the query + identifier, also available in pg_stat_activity (column `query_id`) and + pg_stat_statements (column `queryid`). + + * Second column is optional and, if returned, must hold a json object + containing only text keys and values stat will be used to generate the + stats. + + + See also: + + + * https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW + + * https://www.postgresql.org/docs/current/pgstatstatements.html#PGSTATSTATEMENTS-PG-STAT-STATEMENTS + + ' + displayName: Benchmark Sampling Custom Top Queries Query + path: benchmark.sampling.customTopQueriesQuery + - description: Number of sampled queries to include in the result. By default + `10`. + displayName: Benchmark Sampling Queries + path: benchmark.sampling.queries + - description: When `true` omit to include the top queries stats in the + SGDbOps status. By default `false`. + displayName: Benchmark Sampling Omit Top Queries In Status + path: benchmark.sampling.omitTopQueriesInStatus + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Minimum number of microseconds the sampler will wait between + each sample is taken. By default `10000` (10 milliseconds). + displayName: Benchmark Sampling Sampling Min Interval + path: benchmark.sampling.samplingMinInterval + - description: 'The pgbench benchmark type: + + + * `tpcb-like`: The benchmark is inspired by the [TPC-B benchmark](https://www.tpc.org/TPC_Documents_Latest_Versions/TPC-B_v2.0.0.pdf). + It is the default mode when `connectionType` is set to `primary-service`. + + * `select-only`: The `tpcb-like` but only using SELECTs commands. It + is the default mode when `connectionType` is set to `replicas-service`. + + * `custom`: will use the scripts in the `custom` section to initialize + and and run commands for the benchmark. + + * `replay`: will replay the sampled queries of a sampling benchmark + SGDbOps. If the `custom` section is specified it will be used instead. + Queries can be referenced setting `custom.scripts.replay` to the index + of the query in the sampling benchmark SGDbOps''s status (index start + from 0). + + + See also https://www.postgresql.org/docs/current/pgbench.html#TRANSACTIONS-AND-SCRIPTS + + ' + displayName: Benchmark Pgbench Mode + path: benchmark.pgbench.mode + - description: 'Size of the database to generate. This size is specified + either in Mebibytes, Gibibytes or Tebibytes (multiples of 2^20, 2^30 + or 2^40, respectively). + + ' + displayName: Benchmark Pgbench Database Size + path: benchmark.pgbench.databaseSize + - description: 'An ISO 8601 duration in the format `PnDTnHnMn.nS`, that + specifies how long the benchmark will run. + + ' + displayName: Benchmark Pgbench Duration + path: benchmark.pgbench.duration + - description: '**Deprecated** this field is ignored, use `queryMode` instead. + + + Use extended query protocol with prepared statements. Defaults to: `false`. + + ' + displayName: Benchmark Pgbench Use Prepared Statements + path: benchmark.pgbench.usePreparedStatements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Protocol to use for submitting queries to the server: + + + * `simple`: use simple query protocol. + + * `extended`: use extended query protocol. + + * `prepared`: use extended query protocol with prepared statements. + + + In the prepared mode, pgbench reuses the parse analysis result starting + from the second query iteration, so pgbench runs faster than in other + modes. + + + The default is `simple` query protocol. See also https://www.postgresql.org/docs/current/protocol.html + + ' + displayName: Benchmark Pgbench Query Mode + path: benchmark.pgbench.queryMode + - description: 'Number of clients simulated, that is, number of concurrent + database sessions. Defaults to: `1`. + + ' + displayName: Benchmark Pgbench Concurrent Clients + path: benchmark.pgbench.concurrentClients + - description: 'Number of worker threads within pgbench. Using more than + one thread can be helpful on multi-CPU machines. Clients are distributed + as evenly as possible among available threads. Default is `1`. + + ' + displayName: Benchmark Pgbench Threads + path: benchmark.pgbench.threads + - description: 'Create foreign key constraints between the standard tables. + (This option only take effect if `custom.initiailization` is not specified). + + ' + displayName: Benchmark Pgbench Foreign Keys + path: benchmark.pgbench.foreignKeys + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Create all tables as unlogged tables, rather than permanent + tables. (This option only take effect if `custom.initiailization` is + not specified). + + ' + displayName: Benchmark Pgbench Unlogged Tables + path: benchmark.pgbench.unloggedTables + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Create a partitioned pgbench_accounts table with the specified + method. Expected values are `range` or `hash`. This option requires + that partitions is set to non-zero. If unspecified, default is `range`. + (This option only take effect if `custom.initiailization` is not specified). + + ' + displayName: Benchmark Pgbench Partition Method + path: benchmark.pgbench.partitionMethod + - description: 'Create a partitioned pgbench_accounts table with the specified + number of partitions of nearly equal size for the scaled number of accounts. + Default is 0, meaning no partitioning. (This option only take effect + if `custom.initiailization` is not specified). + + ' + displayName: Benchmark Pgbench Partitions + path: benchmark.pgbench.partitions + - description: "Perform just a selected set of the normal initialization\ + \ steps. init_steps specifies the initialization steps to be performed,\ + \ using one character per step. Each step is invoked in the specified\ + \ order. The default is dtgvp. The available steps are:\n\n* `d` (Drop):\ + \ Drop any existing pgbench tables.\n* `t` (create Tables): Create the\ + \ tables used by the standard pgbench scenario, namely pgbench_accounts,\ + \ pgbench_branches, pgbench_history, and pgbench_tellers.\n* `g` or\ + \ `G` (Generate data, client-side or server-side): Generate data and\ + \ load it into the standard tables, replacing any data already present.\n\ + \ With `g` (client-side data generation), data is generated in pgbench\ + \ client and then sent to the server. This uses the client/server bandwidth\ + \ extensively through a COPY. pgbench uses the FREEZE option with version\ + \ 14 or later of PostgreSQL to speed up subsequent VACUUM, unless partitions\ + \ are enabled. Using g causes logging to print one message every 100,000\ + \ rows while generating data for the pgbench_accounts table.\n With\ + \ `G` (server-side data generation), only small queries are sent from\ + \ the pgbench client and then data is actually generated in the server.\ + \ No significant bandwidth is required for this variant, but the server\ + \ will do more work. Using G causes logging not to print any progress\ + \ message while generating data.\n The default initialization behavior\ + \ uses client-side data generation (equivalent to g).\n* `v` (Vacuum):\ + \ Invoke VACUUM on the standard tables.\n* `p` (create Primary keys):\ + \ Create primary key indexes on the standard tables.\n* `f` (create\ + \ Foreign keys): Create foreign key constraints between the standard\ + \ tables. (Note that this step is not performed by default.)\n" + displayName: Benchmark Pgbench Init Steps + path: benchmark.pgbench.initSteps + - description: 'Create the pgbench_accounts, pgbench_tellers and pgbench_branches + tables with the given fillfactor. Default is 100. + + ' + displayName: Benchmark Pgbench Fillfactor + path: benchmark.pgbench.fillfactor + - description: 'Perform no vacuuming during initialization. (This option + suppresses the `v` initialization step, even if it was specified in + `initSteps`.) + + ' + displayName: Benchmark Pgbench No Vacuum + path: benchmark.pgbench.noVacuum + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: benchmark SGDbOps of type sampling that will be used to replay + sampled queries. + displayName: Benchmark Pgbench Sampling SG Db Ops + path: benchmark.pgbench.samplingSGDbOps + - description: 'Raw SQL script to execute. This field is mutually exclusive + with `scriptFrom` field. + + ' + displayName: Benchmark Pgbench Custom Initialization Script + path: benchmark.pgbench.custom.initialization.script + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Benchmark Pgbench Custom Initialization Script From Secret + Key Ref Name + path: benchmark.pgbench.custom.initialization.scriptFrom.secretKeyRef.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Benchmark Pgbench Custom Initialization Script From Secret + Key Ref Key + path: benchmark.pgbench.custom.initialization.scriptFrom.secretKeyRef.key + - description: 'The name of the ConfigMap that contains the SQL script to + execute. + + ' + displayName: Benchmark Pgbench Custom Initialization Script From Config + Map Key Ref Name + path: benchmark.pgbench.custom.initialization.scriptFrom.configMapKeyRef.name + - description: 'The key name within the ConfigMap that contains the SQL + script to execute. + + ' + displayName: Benchmark Pgbench Custom Initialization Script From Config + Map Key Ref Key + path: benchmark.pgbench.custom.initialization.scriptFrom.configMapKeyRef.key + - description: 'Raw SQL script to execute. This field is mutually exclusive + with `scriptFrom` field. + + ' + displayName: Benchmark Pgbench Custom Scripts Script + path: benchmark.pgbench.custom.scripts.script + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Benchmark Pgbench Custom Scripts Script From Secret Key Ref + Name + path: benchmark.pgbench.custom.scripts.scriptFrom.secretKeyRef.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Benchmark Pgbench Custom Scripts Script From Secret Key Ref + Key + path: benchmark.pgbench.custom.scripts.scriptFrom.secretKeyRef.key + - description: 'The name of the ConfigMap that contains the SQL script to + execute. + + ' + displayName: Benchmark Pgbench Custom Scripts Script From Config Map Key + Ref Name + path: benchmark.pgbench.custom.scripts.scriptFrom.configMapKeyRef.name + - description: 'The key name within the ConfigMap that contains the SQL + script to execute. + + ' + displayName: Benchmark Pgbench Custom Scripts Script From Config Map Key + Ref Key + path: benchmark.pgbench.custom.scripts.scriptFrom.configMapKeyRef.key + - description: 'The name of the builtin script to use. See https://www.postgresql.org/docs/current/pgbench.html#PGBENCH-OPTION-BUILTIN + + + When specified fields `replay`, `script` and `scriptFrom` must not be + set. + + ' + displayName: Benchmark Pgbench Custom Scripts Builtin + path: benchmark.pgbench.custom.scripts.builtin + - description: 'The index of the query in the sampling benchmark SGDbOps''s + status (index start from 0). + + + When specified fields `builtin`, `script` and `scriptFrom` must not + be set. + + ' + displayName: Benchmark Pgbench Custom Scripts Replay + path: benchmark.pgbench.custom.scripts.replay + - description: The weight of this custom SQL script. + displayName: Benchmark Pgbench Custom Scripts Weight + path: benchmark.pgbench.custom.scripts.weight + - description: 'Specify the service where the benchmark will connect to: + + + * `primary-service`: Connect to the primary service + + * `replicas-service`: Connect to the replicas service + + ' + displayName: Benchmark Connection Type + path: benchmark.connectionType + - description: "If true selects \"full\" vacuum, which can reclaim more\ + \ space, but takes much longer and exclusively locks the table.\nThis\ + \ method also requires extra disk space, since it writes a new copy\ + \ of the table and doesn't release the old copy\n until the operation\ + \ is complete. Usually this should only be used when a significant amount\ + \ of space needs to be\n reclaimed from within the table. Defaults\ + \ to: `false`.\n" + displayName: Vacuum Full + path: vacuum.full + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "If true selects aggressive \"freezing\" of tuples. Specifying\ + \ FREEZE is equivalent to performing VACUUM with the\n vacuum_freeze_min_age\ + \ and vacuum_freeze_table_age parameters set to zero. Aggressive freezing\ + \ is always performed\n when the table is rewritten, so this option\ + \ is redundant when FULL is specified. Defaults to: `false`.\n" + displayName: Vacuum Freeze + path: vacuum.freeze + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true, updates statistics used by the planner to determine + the most efficient way to execute a query. Defaults to: `true`. + + ' + displayName: Vacuum Analyze + path: vacuum.analyze + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "Normally, VACUUM will skip pages based on the visibility\ + \ map. Pages where all tuples are known to be frozen can always be\n\ + \ skipped, and those where all tuples are known to be visible to all\ + \ transactions may be skipped except when performing an\n aggressive\ + \ vacuum. Furthermore, except when performing an aggressive vacuum,\ + \ some pages may be skipped in order to avoid\n waiting for other sessions\ + \ to finish using them. This option disables all page-skipping behavior,\ + \ and is intended to be\n used only when the contents of the visibility\ + \ map are suspect, which should happen only if there is a hardware or\n\ + \ software issue causing database corruption. Defaults to: `false`.\n" + displayName: Vacuum Disable Page Skipping + path: vacuum.disablePageSkipping + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: the name of the database + displayName: Vacuum Databases Name + path: vacuum.databases.name + - description: "If true selects \"full\" vacuum, which can reclaim more\ + \ space, but takes much longer and exclusively locks the table.\nThis\ + \ method also requires extra disk space, since it writes a new copy\ + \ of the table and doesn't release the old copy\n until the operation\ + \ is complete. Usually this should only be used when a significant amount\ + \ of space needs to be\n reclaimed from within the table. Defaults\ + \ to: `false`.\n" + displayName: Vacuum Databases Full + path: vacuum.databases.full + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "If true selects aggressive \"freezing\" of tuples. Specifying\ + \ FREEZE is equivalent to performing VACUUM with the\n vacuum_freeze_min_age\ + \ and vacuum_freeze_table_age parameters set to zero. Aggressive freezing\ + \ is always performed\n when the table is rewritten, so this option\ + \ is redundant when FULL is specified. Defaults to: `false`.\n" + displayName: Vacuum Databases Freeze + path: vacuum.databases.freeze + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true, updates statistics used by the planner to determine + the most efficient way to execute a query. Defaults to: `true`. + + ' + displayName: Vacuum Databases Analyze + path: vacuum.databases.analyze + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "Normally, VACUUM will skip pages based on the visibility\ + \ map. Pages where all tuples are known to be frozen can always be\n\ + \ skipped, and those where all tuples are known to be visible to all\ + \ transactions may be skipped except when performing an\n aggressive\ + \ vacuum. Furthermore, except when performing an aggressive vacuum,\ + \ some pages may be skipped in order to avoid\n waiting for other sessions\ + \ to finish using them. This option disables all page-skipping behavior,\ + \ and is intended to be\n used only when the contents of the visibility\ + \ map are suspect, which should happen only if there is a hardware or\n\ + \ software issue causing database corruption. Defaults to: `false`.\n" + displayName: Vacuum Databases Disable Page Skipping + path: vacuum.databases.disablePageSkipping + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true do vacuum full instead of cluster. Defaults to: + `false`. + + ' + displayName: Repack No Order + path: repack.noOrder + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If specified, an ISO 8601 duration format `PnDTnHnMn.nS` + to set a timeout to cancel other backends on conflict. + + ' + displayName: Repack Wait Timeout + path: repack.waitTimeout + - description: 'If true don''t kill other backends when timed out. Defaults + to: `false`. + + ' + displayName: Repack No Kill Backend + path: repack.noKillBackend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true don''t analyze at end. Defaults to: `false`. + + ' + displayName: Repack No Analyze + path: repack.noAnalyze + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true don''t repack tables which belong to specific extension. + Defaults to: `false`. + + ' + displayName: Repack Exclude Extension + path: repack.excludeExtension + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: the name of the database + displayName: Repack Databases Name + path: repack.databases.name + - description: 'If true do vacuum full instead of cluster. Defaults to: + `false`. + + ' + displayName: Repack Databases No Order + path: repack.databases.noOrder + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If specified, an ISO 8601 duration format `PnDTnHnMn.nS` + to set a timeout to cancel other backends on conflict. + + ' + displayName: Repack Databases Wait Timeout + path: repack.databases.waitTimeout + - description: 'If true don''t kill other backends when timed out. Defaults + to: `false`. + + ' + displayName: Repack Databases No Kill Backend + path: repack.databases.noKillBackend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true don''t analyze at end. Defaults to: `false`. + + ' + displayName: Repack Databases No Analyze + path: repack.databases.noAnalyze + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true don''t repack tables which belong to specific extension. + Defaults to: `false`. + + ' + displayName: Repack Databases Exclude Extension + path: repack.databases.excludeExtension + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The target postgres version that must have the same major + version of the target SGCluster. + + ' + displayName: Major Version Upgrade Postgres Version + path: majorVersionUpgrade.postgresVersion + - description: The name of the extension to deploy. + displayName: Major Version Upgrade Postgres Extensions Name + path: majorVersionUpgrade.postgresExtensions.name + - description: The id of the publisher of the extension to deploy. If not + specified `com.ongres` will be used by default. + displayName: Major Version Upgrade Postgres Extensions Publisher + path: majorVersionUpgrade.postgresExtensions.publisher + - description: The version of the extension to deploy. If not specified + version of `stable` channel will be used by default and if only a version + is available that one will be used. + displayName: Major Version Upgrade Postgres Extensions Version + path: majorVersionUpgrade.postgresExtensions.version + - description: 'The repository base URL from where to obtain the extension + to deploy. + + + **This section is filled by the operator.** + + ' + displayName: Major Version Upgrade Postgres Extensions Repository + path: majorVersionUpgrade.postgresExtensions.repository + - description: 'The postgres config that must have the same major version + of the target postgres version. + + ' + displayName: Major Version Upgrade SGPostgresConfig + path: majorVersionUpgrade.sgPostgresConfig + - description: "The path were the backup is stored. If not set this field\ + \ is filled up by the operator.\n\nWhen provided will indicate were\ + \ the backups and WAL files will be stored.\n\nThe path should be different\ + \ from the current `.spec.configurations.backups[].path` value for the\ + \ target `SGCluster`\n in order to avoid mixing WAL files of two distinct\ + \ major versions of postgres.\n" + displayName: Major Version Upgrade Backup Path + path: majorVersionUpgrade.backupPath + - description: 'If true use hard links instead of copying files to the new + cluster. This option is mutually exclusive with `clone`. Defaults to: + `false`. + + ' + displayName: Major Version Upgrade Link + path: majorVersionUpgrade.link + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "If true use efficient file cloning (also known as \"reflinks\"\ + \ on some systems) instead of copying files to the new cluster.\nThis\ + \ can result in near-instantaneous copying of the data files, giving\ + \ the speed advantages of `link` while leaving the old\n cluster untouched.\ + \ This option is mutually exclusive with `link`. Defaults to: `false`.\n\ + \nFile cloning is only supported on some operating systems and file\ + \ systems. If it is selected but not supported, the pg_upgrade\n run\ + \ will error. At present, it is supported on Linux (kernel 4.5 or later)\ + \ with Btrfs and XFS (on file systems created with\n reflink support),\ + \ and on macOS with APFS.\n" + displayName: Major Version Upgrade Clone + path: majorVersionUpgrade.clone + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If true does some checks to see if the cluster can perform + a major version upgrade without changing any data. Defaults to: `false`. + + ' + displayName: Major Version Upgrade Check + path: majorVersionUpgrade.check + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The name of the extension to install. + displayName: Major Version Upgrade To Install Postgres Extensions Name + path: majorVersionUpgrade.toInstallPostgresExtensions.name + - description: The id of the publisher of the extension to install. + displayName: Major Version Upgrade To Install Postgres Extensions Publisher + path: majorVersionUpgrade.toInstallPostgresExtensions.publisher + - description: The version of the extension to install. + displayName: Major Version Upgrade To Install Postgres Extensions Version + path: majorVersionUpgrade.toInstallPostgresExtensions.version + - description: The repository base URL from where the extension will be + installed from. + displayName: Major Version Upgrade To Install Postgres Extensions Repository + path: majorVersionUpgrade.toInstallPostgresExtensions.repository + - description: The postgres major version of the extension to install. + displayName: Major Version Upgrade To Install Postgres Extensions Postgres + Version + path: majorVersionUpgrade.toInstallPostgresExtensions.postgresVersion + - description: The build version of the extension to install. + displayName: Major Version Upgrade To Install Postgres Extensions Build + path: majorVersionUpgrade.toInstallPostgresExtensions.build + - description: The extra mount of the installed extension. + displayName: Major Version Upgrade To Install Postgres Extensions Extra + Mounts + path: majorVersionUpgrade.toInstallPostgresExtensions.extraMounts + - description: "The method used to perform the restart operation. Available\ + \ methods are:\n\n* `InPlace`: the in-place method does not require\ + \ more resources than those that are available.\n In case only an instance\ + \ of the StackGres cluster is present this mean the service disruption\ + \ will\n last longer so we encourage use the reduced impact restart\ + \ and especially for a production environment.\n* `ReducedImpact`: this\ + \ procedure is the same as the in-place method but require additional\n\ + \ resources in order to spawn a new updated replica that will be removed\ + \ when the procedure completes.\n" + displayName: Restart Method + path: restart.method + - description: "By default all Pods are restarted. Setting this option to\ + \ `true` allow to restart only those Pods which\n are in pending restart\ + \ state as detected by the operation. Defaults to: `false`.\n" + displayName: Restart Only Pending Restart + path: restart.onlyPendingRestart + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The target postgres version that must have the same major + version of the target SGCluster. + + ' + displayName: Minor Version Upgrade Postgres Version + path: minorVersionUpgrade.postgresVersion + - description: "The method used to perform the minor version upgrade operation.\ + \ Available methods are:\n\n* `InPlace`: the in-place method does not\ + \ require more resources than those that are available.\n In case only\ + \ an instance of the StackGres cluster is present this mean the service\ + \ disruption will\n last longer so we encourage use the reduced impact\ + \ restart and especially for a production environment.\n* `ReducedImpact`:\ + \ this procedure is the same as the in-place method but require additional\n\ + \ resources in order to spawn a new updated replica that will be removed\ + \ when the procedure completes.\n" + displayName: Minor Version Upgrade Method + path: minorVersionUpgrade.method + - description: "The method used to perform the security upgrade operation.\ + \ Available methods are:\n\n* `InPlace`: the in-place method does not\ + \ require more resources than those that are available.\n In case only\ + \ an instance of the StackGres cluster is present this mean the service\ + \ disruption will\n last longer so we encourage use the reduced impact\ + \ restart and especially for a production environment.\n* `ReducedImpact`:\ + \ this procedure is the same as the in-place method but require additional\n\ + \ resources in order to spawn a new updated replica that will be removed\ + \ when the procedure completes.\n" + displayName: Security Upgrade Method + path: securityUpgrade.method + statusDescriptors: + - displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + - description: Last time the condition transitioned from one status to another. + displayName: Conditions Last Transition Time + path: conditions.lastTransitionTime + - description: A human-readable message indicating details about the transition. + displayName: Conditions Message + path: conditions.message + - description: The reason for the condition last transition. + displayName: Conditions Reason + path: conditions.reason + - description: Status of the condition, one of `True`, `False` or `Unknown`. + displayName: Conditions Status + path: conditions.status + - description: Type of deployment condition. + displayName: Conditions Type + path: conditions.type + - description: 'The number of retries performed by the operation + + ' + displayName: Op Retries + path: opRetries + - description: 'The ISO 8601 timestamp of when the operation started running + + ' + displayName: Op Started + path: opStarted + - description: The query id of the representative statement calculated by + Postgres + displayName: Benchmark Sampling Top Queries Id + path: benchmark.sampling.topQueries.id + - displayName: Benchmark Sampling Top Queries Stats + path: benchmark.sampling.topQueries.stats + - description: The query id of the representative statement calculated by + Postgres + displayName: Benchmark Sampling Queries Id + path: benchmark.sampling.queries.id + - description: A sampled SQL query + displayName: Benchmark Sampling Queries Query + path: benchmark.sampling.queries.query + - description: The sampled query timestamp + displayName: Benchmark Sampling Queries Timestamp + path: benchmark.sampling.queries.timestamp + - description: 'The number of transactions processed. + + ' + displayName: Benchmark Pgbench Transactions Processed + path: benchmark.pgbench.transactionsProcessed + - description: 'The latency measure unit + + ' + displayName: Benchmark Pgbench Latency Average Unit + path: benchmark.pgbench.latency.average.unit + - description: 'The latency measure unit + + ' + displayName: Benchmark Pgbench Latency Standard Deviation Unit + path: benchmark.pgbench.latency.standardDeviation.unit + - description: 'Transactions Per Second (tps) measure unit + + ' + displayName: Benchmark Pgbench Transactions Per Second Including Connections + Establishing Unit + path: benchmark.pgbench.transactionsPerSecond.includingConnectionsEstablishing.unit + - description: 'Transactions Per Second (tps) measure unit + + ' + displayName: Benchmark Pgbench Transactions Per Second Excluding Connections + Establishing Unit + path: benchmark.pgbench.transactionsPerSecond.excludingConnectionsEstablishing.unit + - description: The Transactions Per Second (tps) measures unit + displayName: Benchmark Pgbench Transactions Per Second Over Time Values + Unit + path: benchmark.pgbench.transactionsPerSecond.overTime.valuesUnit + - description: The interval duration measure unit + displayName: Benchmark Pgbench Transactions Per Second Over Time Interval + Duration Unit + path: benchmark.pgbench.transactionsPerSecond.overTime.intervalDurationUnit + - description: The script index (`0` if no custom scripts have been defined) + displayName: Benchmark Pgbench Statements Script + path: benchmark.pgbench.statements.script + - description: The command + displayName: Benchmark Pgbench Statements Command + path: benchmark.pgbench.statements.command + - description: The average latency measure unit + displayName: Benchmark Pgbench Statements Unit + path: benchmark.pgbench.statements.unit + - description: Compressed and base 64 encoded HdrHistogram + displayName: Benchmark Pgbench Hdr Histogram + path: benchmark.pgbench.hdrHistogram + - description: 'The postgres version currently used by the primary instance + + ' + displayName: Major Version Upgrade Source Postgres Version + path: majorVersionUpgrade.sourcePostgresVersion + - description: 'The postgres version that the cluster will be upgraded to + + ' + displayName: Major Version Upgrade Target Postgres Version + path: majorVersionUpgrade.targetPostgresVersion + - description: 'The primary instance when the operation started + + ' + displayName: Major Version Upgrade Primary Instance + path: majorVersionUpgrade.primaryInstance + - displayName: Major Version Upgrade Initial Instances + path: majorVersionUpgrade.initialInstances + - displayName: Major Version Upgrade Pending To Restart Instances + path: majorVersionUpgrade.pendingToRestartInstances + - displayName: Major Version Upgrade Restarted Instances + path: majorVersionUpgrade.restartedInstances + - description: 'The phase the operation is or was executing) + + ' + displayName: Major Version Upgrade Phase + path: majorVersionUpgrade.phase + - description: 'A failure message (when available) + + ' + displayName: Major Version Upgrade Failure + path: majorVersionUpgrade.failure + - description: 'The primary instance when the operation started + + ' + displayName: Restart Primary Instance + path: restart.primaryInstance + - displayName: Restart Initial Instances + path: restart.initialInstances + - displayName: Restart Pending To Restart Instances + path: restart.pendingToRestartInstances + - displayName: Restart Restarted Instances + path: restart.restartedInstances + - description: 'An ISO 8601 date indicating if and when the switchover initiated + + ' + displayName: Restart Switchover Initiated + path: restart.switchoverInitiated + - description: 'An ISO 8601 date indicating if and when the switchover finalized + + ' + displayName: Restart Switchover Finalized + path: restart.switchoverFinalized + - description: 'A failure message (when available) + + ' + displayName: Restart Failure + path: restart.failure + - description: 'The postgres version currently used by the primary instance + + ' + displayName: Minor Version Upgrade Source Postgres Version + path: minorVersionUpgrade.sourcePostgresVersion + - description: 'The postgres version that the cluster will be upgraded (or + downgraded) to + + ' + displayName: Minor Version Upgrade Target Postgres Version + path: minorVersionUpgrade.targetPostgresVersion + - description: 'The primary instance when the operation started + + ' + displayName: Minor Version Upgrade Primary Instance + path: minorVersionUpgrade.primaryInstance + - displayName: Minor Version Upgrade Initial Instances + path: minorVersionUpgrade.initialInstances + - displayName: Minor Version Upgrade Pending To Restart Instances + path: minorVersionUpgrade.pendingToRestartInstances + - displayName: Minor Version Upgrade Restarted Instances + path: minorVersionUpgrade.restartedInstances + - description: 'An ISO 8601 date indicating if and when the switchover initiated + + ' + displayName: Minor Version Upgrade Switchover Initiated + path: minorVersionUpgrade.switchoverInitiated + - description: 'An ISO 8601 date indicating if and when the switchover finalized + + ' + displayName: Minor Version Upgrade Switchover Finalized + path: minorVersionUpgrade.switchoverFinalized + - description: 'A failure message (when available) + + ' + displayName: Minor Version Upgrade Failure + path: minorVersionUpgrade.failure + - description: 'The primary instance when the operation started + + ' + displayName: Security Upgrade Primary Instance + path: securityUpgrade.primaryInstance + - displayName: Security Upgrade Initial Instances + path: securityUpgrade.initialInstances + - displayName: Security Upgrade Pending To Restart Instances + path: securityUpgrade.pendingToRestartInstances + - displayName: Security Upgrade Restarted Instances + path: securityUpgrade.restartedInstances + - description: 'An ISO 8601 date indicating if and when the switchover initiated + + ' + displayName: Security Upgrade Switchover Initiated + path: securityUpgrade.switchoverInitiated + - description: 'An ISO 8601 date indicating if and when the switchover finalized + + ' + displayName: Security Upgrade Switchover Finalized + path: securityUpgrade.switchoverFinalized + - description: 'A failure message (when available) + + ' + displayName: Security Upgrade Failure + path: securityUpgrade.failure + version: v1 + - description: Multi-tenant logs server, to aggregate Postgres logs. Fully managed + displayName: StackGres Distributed Logs + kind: SGDistributedLogs + name: sgdistributedlogs.stackgres.io + specDescriptors: + - description: "The profile allow to change in a convenient place a set\ + \ of configuration defaults that affect how the cluster is generated.\n\ + \nAll those defaults can be overwritten by setting the correspoinding\ + \ fields.\n\nAvailable profiles are:\n\n* `production`:\n\n Prevents\ + \ two Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `false` by default).\n Sets both limits and requests using `SGInstanceProfile`\ + \ for `patroni` container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced `SGInstanceProfile`\ + \ for sidecar containers other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `testing`:\n\n Allows two Pods to running\ + \ in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Sets both limits and requests using `SGInstanceProfile`\ + \ for `patroni` container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced `SGInstanceProfile`\ + \ for sidecar containers other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `development`:\n\n Allows two Pods from\ + \ running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Unset both limits and requests for `patroni`\ + \ container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `true` by default).\n Unsets requests for sidecar containers other\ + \ than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `true` by default).\n\n**Changing this field may require a restart.**\n" + displayName: Profile + path: profile + - description: 'Size of the PersistentVolume set for the pod of the cluster + for distributed logs. This size is specified either in Mebibytes, Gibibytes + or Tebibytes (multiples of 2^20, 2^30 or 2^40, respectively). + + ' + displayName: Persistent Volume Size + path: persistentVolume.size + - description: 'Name of an existing StorageClass in the Kubernetes cluster, + used to create the PersistentVolumes for the instances of the cluster. + + ' + displayName: Persistent Volume Storage Class + path: persistentVolume.storageClass + - description: Specifies the type of Kubernetes service(`ClusterIP`, `LoadBalancer`, + `NodePort`) + displayName: Postgres Services Primary Type + path: postgresServices.primary.type + - description: allocateLoadBalancerNodePorts defines if NodePorts will be + automatically allocated for services with type LoadBalancer. Default + is "true". It may be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific NodePorts (by + specifying a value), those requests will be respected, regardless of + this field. This field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any other type. + displayName: Postgres Services Primary Allocate Load Balancer Node Ports + path: postgresServices.primary.allocateLoadBalancerNodePorts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Postgres Services Primary External I Ps + path: postgresServices.primary.externalIPs + - description: externalTrafficPolicy describes how nodes distribute service + traffic they receive on one of the Service's "externally-facing" addresses + (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the + proxy will configure the service in a way that assumes that external + load balancers will take care of balancing the service traffic between + nodes, and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client source IP. + (Traffic mistakenly sent to a node with no endpoints will be dropped.) + The default value, "Cluster", uses the standard behavior of routing + to all endpoints evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer IP from within + the cluster will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to take traffic policy + into account when picking a node. + displayName: Postgres Services Primary External Traffic Policy + path: postgresServices.primary.externalTrafficPolicy + - description: healthCheckNodePort specifies the healthcheck nodePort for + the service. This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, a value will + be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). This + field cannot be updated once set. + displayName: Postgres Services Primary Health Check Node Port + path: postgresServices.primary.healthCheckNodePort + - description: InternalTrafficPolicy describes how nodes distribute service + traffic they receive on the ClusterIP. If set to "Local", the proxy + will assume that pods only want to talk to endpoints of the service + on the same node as the pod, dropping the traffic if there are no local + endpoints. The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified by topology and + other features). + displayName: Postgres Services Primary Internal Traffic Policy + path: postgresServices.primary.internalTrafficPolicy + - displayName: Postgres Services Primary Ip Families + path: postgresServices.primary.ipFamilies + - description: IPFamilyPolicy represents the dual-stack-ness requested or + required by this Service. If there is no value provided, then this field + will be set to SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on dual-stack configured + clusters or a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). + The ipFamilies and clusterIPs fields depend on the value of this field. + This field will be wiped when updating a service to type ExternalName. + displayName: Postgres Services Primary Ip Family Policy + path: postgresServices.primary.ipFamilyPolicy + - description: loadBalancerClass is the class of the load balancer implementation + this Service belongs to. If specified, the value of this field must + be a label-style identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is used, today + this is typically done through the cloud provider integration, but should + apply for any default implementation. If set, it is assumed that a load + balancer implementation is watching for Services with a matching class. + Any default load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only be set when + creating or updating a Service to type 'LoadBalancer'. Once set, it + can not be changed. This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + displayName: Postgres Services Primary Load Balancer Class + path: postgresServices.primary.loadBalancerClass + - description: 'Only applies to Service Type: LoadBalancer. This feature + depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. This field will + be ignored if the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. Users are + encouraged to use implementation-specific annotations when available.' + displayName: Postgres Services Primary Load Balancer IP + path: postgresServices.primary.loadBalancerIP + - displayName: Postgres Services Primary Load Balancer Source Ranges + path: postgresServices.primary.loadBalancerSourceRanges + - description: publishNotReadyAddresses indicates that any agent which deals + with endpoints for this Service should disregard any indications of + ready/not-ready. The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes controllers that + generate Endpoints and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" even if the Pods + themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this + behavior. + displayName: Postgres Services Primary Publish Not Ready Addresses + path: postgresServices.primary.publishNotReadyAddresses + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Supports "ClientIP" and "None". Used to maintain session + affinity. Enable client IP based session affinity. Must be ClientIP + or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + displayName: Postgres Services Primary Session Affinity + path: postgresServices.primary.sessionAffinity + - description: timeoutSeconds specifies the seconds of ClientIP type session + sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + displayName: Postgres Services Primary Session Affinity Config Client + IP Timeout Seconds + path: postgresServices.primary.sessionAffinityConfig.clientIP.timeoutSeconds + - description: the node port that will be exposed to connect to Postgres + instance + displayName: Postgres Services Primary Node Ports Pgport + path: postgresServices.primary.nodePorts.pgport + - description: the node port that will be exposed to connect to Postgres + instance for replication purpose + displayName: Postgres Services Primary Node Ports Replicationport + path: postgresServices.primary.nodePorts.replicationport + - description: Specifies the type of Kubernetes service(`ClusterIP`, `LoadBalancer`, + `NodePort`) + displayName: Postgres Services Replicas Type + path: postgresServices.replicas.type + - description: allocateLoadBalancerNodePorts defines if NodePorts will be + automatically allocated for services with type LoadBalancer. Default + is "true". It may be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific NodePorts (by + specifying a value), those requests will be respected, regardless of + this field. This field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any other type. + displayName: Postgres Services Replicas Allocate Load Balancer Node Ports + path: postgresServices.replicas.allocateLoadBalancerNodePorts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Postgres Services Replicas External I Ps + path: postgresServices.replicas.externalIPs + - description: externalTrafficPolicy describes how nodes distribute service + traffic they receive on one of the Service's "externally-facing" addresses + (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the + proxy will configure the service in a way that assumes that external + load balancers will take care of balancing the service traffic between + nodes, and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client source IP. + (Traffic mistakenly sent to a node with no endpoints will be dropped.) + The default value, "Cluster", uses the standard behavior of routing + to all endpoints evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer IP from within + the cluster will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to take traffic policy + into account when picking a node. + displayName: Postgres Services Replicas External Traffic Policy + path: postgresServices.replicas.externalTrafficPolicy + - description: healthCheckNodePort specifies the healthcheck nodePort for + the service. This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, a value will + be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). This + field cannot be updated once set. + displayName: Postgres Services Replicas Health Check Node Port + path: postgresServices.replicas.healthCheckNodePort + - description: InternalTrafficPolicy describes how nodes distribute service + traffic they receive on the ClusterIP. If set to "Local", the proxy + will assume that pods only want to talk to endpoints of the service + on the same node as the pod, dropping the traffic if there are no local + endpoints. The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified by topology and + other features). + displayName: Postgres Services Replicas Internal Traffic Policy + path: postgresServices.replicas.internalTrafficPolicy + - displayName: Postgres Services Replicas Ip Families + path: postgresServices.replicas.ipFamilies + - description: IPFamilyPolicy represents the dual-stack-ness requested or + required by this Service. If there is no value provided, then this field + will be set to SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on dual-stack configured + clusters or a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). + The ipFamilies and clusterIPs fields depend on the value of this field. + This field will be wiped when updating a service to type ExternalName. + displayName: Postgres Services Replicas Ip Family Policy + path: postgresServices.replicas.ipFamilyPolicy + - description: loadBalancerClass is the class of the load balancer implementation + this Service belongs to. If specified, the value of this field must + be a label-style identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is used, today + this is typically done through the cloud provider integration, but should + apply for any default implementation. If set, it is assumed that a load + balancer implementation is watching for Services with a matching class. + Any default load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only be set when + creating or updating a Service to type 'LoadBalancer'. Once set, it + can not be changed. This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + displayName: Postgres Services Replicas Load Balancer Class + path: postgresServices.replicas.loadBalancerClass + - description: 'Only applies to Service Type: LoadBalancer. This feature + depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. This field will + be ignored if the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. Users are + encouraged to use implementation-specific annotations when available.' + displayName: Postgres Services Replicas Load Balancer IP + path: postgresServices.replicas.loadBalancerIP + - displayName: Postgres Services Replicas Load Balancer Source Ranges + path: postgresServices.replicas.loadBalancerSourceRanges + - description: publishNotReadyAddresses indicates that any agent which deals + with endpoints for this Service should disregard any indications of + ready/not-ready. The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes controllers that + generate Endpoints and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" even if the Pods + themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this + behavior. + displayName: Postgres Services Replicas Publish Not Ready Addresses + path: postgresServices.replicas.publishNotReadyAddresses + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Supports "ClientIP" and "None". Used to maintain session + affinity. Enable client IP based session affinity. Must be ClientIP + or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + displayName: Postgres Services Replicas Session Affinity + path: postgresServices.replicas.sessionAffinity + - description: timeoutSeconds specifies the seconds of ClientIP type session + sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + displayName: Postgres Services Replicas Session Affinity Config Client + IP Timeout Seconds + path: postgresServices.replicas.sessionAffinityConfig.clientIP.timeoutSeconds + - description: the node port that will be exposed to connect to Postgres + instance + displayName: Postgres Services Replicas Node Ports Pgport + path: postgresServices.replicas.nodePorts.pgport + - description: the node port that will be exposed to connect to Postgres + instance for replication purpose + displayName: Postgres Services Replicas Node Ports Replicationport + path: postgresServices.replicas.nodePorts.replicationport + - description: Specify if the `-replicas` service should be created or not. + displayName: Postgres Services Replicas Enabled + path: postgresServices.replicas.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When set to `true` resources limits for containers other + than the patroni container wil be set just like for patroni contianer + as specified in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + displayName: Resources Enable Cluster Limits Requirements + path: resources.enableClusterLimitsRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the resources requests values in fields\ + \ `SGInstanceProfile.spec.requests.cpu` and `SGInstanceProfile.spec.requests.memory`\ + \ will represent the resources\n requests of the patroni container and\ + \ the total resources requests calculated by adding the resources requests\ + \ of all the containers (including the patroni container).\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Resources Disable Resources Requests Split From Total + path: resources.disableResourcesRequestsSplitFromTotal + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Scheduling Node Selector + path: scheduling.nodeSelector + - description: Effect indicates the taint effect to match. Empty means match + all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + displayName: Scheduling Tolerations Effect + path: scheduling.tolerations.effect + - description: Key is the taint key that the toleration applies to. Empty + means match all taint keys. If the key is empty, operator must be Exists; + this combination means to match all values and all keys. + displayName: Scheduling Tolerations Key + path: scheduling.tolerations.key + - description: Operator represents a key's relationship to the value. Valid + operators are Exists and Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate all taints of a particular + category. + displayName: Scheduling Tolerations Operator + path: scheduling.tolerations.operator + - description: TolerationSeconds represents the period of time the toleration + (which must be of effect NoExecute, otherwise this field is ignored) + tolerates the taint. By default, it is not set, which means tolerate + the taint forever (do not evict). Zero and negative values will be treated + as 0 (evict immediately) by the system. + displayName: Scheduling Tolerations Toleration Seconds + path: scheduling.tolerations.tolerationSeconds + - description: Value is the taint value the toleration matches to. If the + operator is Exists, the value should be empty, otherwise just a regular + string. + displayName: Scheduling Tolerations Value + path: scheduling.tolerations.value + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Scheduling Node Affinity + path: scheduling.nodeAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:nodeAffinity + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Scheduling Priority Class Name + path: scheduling.priorityClassName + - description: 'Pod affinity is a group of inter pod affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + displayName: Scheduling Pod Affinity + path: scheduling.podAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAffinity + - description: 'Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + displayName: Scheduling Pod Anti Affinity + path: scheduling.podAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity + - description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/04-postgres-cluster-management/03-resource-profiles/). + A SGInstanceProfile defines CPU and memory limits. Must exist before + creating a distributed logs. When no profile is set, a default (currently: + 1 core, 2 GiB RAM) one is used. + + + **Changing this field may require a restart.** + + ' + displayName: SGInstanceProfile + path: sgInstanceProfile + - description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the distributed logs. It must exist. When not set, a default + Postgres config, for the major version selected, is used. + + + **Changing this field may require a restart.** + + ' + displayName: Configurations SGPostgresConfig + path: configurations.sgPostgresConfig + - displayName: Metadata Annotations All Resources + path: metadata.annotations.allResources + - displayName: Metadata Annotations Cluster Pods + path: metadata.annotations.clusterPods + - displayName: Metadata Annotations Services + path: metadata.annotations.services + - displayName: Metadata Annotations Primary Service + path: metadata.annotations.primaryService + - displayName: Metadata Annotations Replicas Service + path: metadata.annotations.replicasService + - displayName: Metadata Labels Cluster Pods + path: metadata.labels.clusterPods + - displayName: Metadata Labels Services + path: metadata.labels.services + - description: 'It is a best practice, on non-containerized environments, + when running production workloads, to run each database server on a + different server (virtual or physical), i.e., not to co-locate more + than one database server per host. + + + The same best practice applies to databases on containers. By default, + StackGres will not allow to run more than one StackGres or Distributed + Logs pod on a given Kubernetes node. If set to `true` it will allow + more than one StackGres pod per node. + + + **Changing this field may require a restart.** + + ' + displayName: Non Production Options Disable Cluster Pod Anti Affinity + path: nonProductionOptions.disableClusterPodAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It is a best practice, on containerized environments, when + running production workloads, to enforce container''s resources requirements. + + + The same best practice applies to databases on containers. By default, + StackGres will configure resource requirements for patroni container. + Set this property to true to prevent StackGres from setting patroni + container''s resources requirement. + + + **Changing this field may require a restart.** + + ' + displayName: Non Production Options Disable Patroni Resource Requirements + path: nonProductionOptions.disablePatroniResourceRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It is a best practice, on containerized environments, when + running production workloads, to enforce container''s resources requirements. + + + By default, StackGres will configure resource requirements for all the + containers. Set this property to true to prevent StackGres from setting + container''s resources requirements (except for patroni container, see + `disablePatroniResourceRequirements`). + + + **Changing this field may require a restart.** + + ' + displayName: Non Production Options Disable Cluster Resource Requirements + path: nonProductionOptions.disableClusterResourceRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's cpu requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ cpu than it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs on the node.\n\ + \nBy default, StackGres will configure cpu requirements to have the\ + \ same limit and request for the patroni container. Set this property\ + \ to true to prevent StackGres from setting patroni container's cpu\ + \ requirements request equals to the limit\n when `.spec.requests.cpu`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Non Production Options Enable Set Patroni Cpu Requests + path: nonProductionOptions.enableSetPatroniCpuRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's cpu requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ cpu than it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs on the node.\n\ + \nBy default, StackGres will configure cpu requirements to have the\ + \ same limit and request for all the containers. Set this property to\ + \ true to prevent StackGres from setting container's cpu requirements\ + \ request equals to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..cpu` `.spec.requests.initContainers..cpu` is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + displayName: Non Production Options Enable Set Cluster Cpu Requests + path: nonProductionOptions.enableSetClusterCpuRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's memory requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ memory than it requires.\n\nBy default, StackGres will configure memory\ + \ requirements to have the same limit and request for the patroni container.\ + \ Set this property to true to prevent StackGres from setting patroni\ + \ container's memory requirements request equals to the limit\n when\ + \ `.spec.requests.memory` is configured in the referenced `SGInstanceProfile`.\n\ + \n**Changing this field may require a restart.**\n" + displayName: Non Production Options Enable Set Patroni Memory Requests + path: nonProductionOptions.enableSetPatroniMemoryRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's memory requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ memory than it requires.\n\nBy default, StackGres will configure memory\ + \ requirements to have the same limit and request for all the containers.\ + \ Set this property to true to prevent StackGres from setting container's\ + \ memory requirements request equals to the limit (except for patroni\ + \ container, see `enablePatroniCpuRequests`)\n when `.spec.requests.containers..memory` `.spec.requests.initContainers..memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Non Production Options Enable Set Cluster Memory Requests + path: nonProductionOptions.enableSetClusterMemoryRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + statusDescriptors: + - description: Last time the condition transitioned from one status to another. + displayName: Conditions Last Transition Time + path: conditions.lastTransitionTime + - description: A human readable message indicating details about the transition. + displayName: Conditions Message + path: conditions.message + - description: The reason for the condition's last transition. + displayName: Conditions Reason + path: conditions.reason + - description: Status of the condition, one of True, False, Unknown. + displayName: Conditions Status + path: conditions.status + - description: Type of deployment condition. + displayName: Conditions Type + path: conditions.type + - description: The used Postgres version + displayName: Postgres Version + path: postgresVersion + - description: The used Timescaledb version + displayName: Timescaledb Version + path: timescaledbVersion + - description: The database name that has been created + displayName: Databases Name + path: databases.name + - description: The retention window that has been applied to tables + displayName: Databases Retention + path: databases.retention + - description: The `sgcluster` namespace + displayName: Connected Clusters Namespace + path: connectedClusters.namespace + - description: The `sgcluster` name + displayName: Connected Clusters Name + path: connectedClusters.name + - description: The `sgdistributedlogs` to which this `sgcluster` is connected + to + displayName: Connected Clusters Config SGDistributedLogs + path: connectedClusters.config.sgDistributedLogs + - description: The retention window that has been applied to tables + displayName: Connected Clusters Config Retention + path: connectedClusters.config.retention + - description: The hash of the configuration file that is used by fluentd + displayName: Fluentd Config Hash + path: fluentdConfigHash + - description: The custom prefix that is prepended to all labels. + displayName: Label Prefix + path: labelPrefix + - description: Flag to indicate the previous existing ConfigMap has been + removed. + displayName: Old Config Map Removed + path: oldConfigMapRemoved + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + version: v1 + - description: Instance Profiles are like "t-shirt" sizes, used for pods sizing + displayName: StackGres Instance Profile + kind: SGInstanceProfile + name: sginstanceprofiles.stackgres.io + specDescriptors: + - description: "CPU(s) (cores) limits for every resource's Pod that reference\ + \ this SGInstanceProfile. The suffix `m`\n specifies millicpus (where\ + \ 1000m is equals to 1).\n\nThe number of cpu limits is assigned to\ + \ the patroni container (that runs both Patroni and PostgreSQL).\n\n\ + A minimum of 2 cpu is recommended.\n" + displayName: Cpu + path: cpu + - description: "RAM limits for every resource's Pod that reference this\ + \ SGInstanceProfile. The suffix `Mi` or `Gi`\n specifies Mebibytes\ + \ or Gibibytes, respectively.\n\nThe amount of RAM limits is assigned\ + \ to the patroni container (that runs both Patroni and PostgreSQL).\n\ + \nA minimum of 2Gi is recommended.\n" + displayName: Memory + path: memory + - description: "RAM limits allocated for huge pages of the patroni container\ + \ (that runs both Patroni and PostgreSQL) with a size of 2Mi. The suffix\ + \ `Mi` or `Gi`\n specifies Mebibytes or Gibibytes, respectively.\n" + displayName: Huge Pages Hugepages-2 Mi + path: hugePages.hugepages-2Mi + - description: "RAM limits allocated for huge pages of the patroni container\ + \ (that runs both Patroni and PostgreSQL) with a size of 1Gi. The suffix\ + \ `Mi` or `Gi`\n specifies Mebibytes or Gibibytes, respectively.\n" + displayName: Huge Pages Hugepages-1 Gi + path: hugePages.hugepages-1Gi + - description: "CPU(s) (cores) limits for the specified container. The suffix\ + \ `m`\n specifies millicpus (where 1000m is equals to 1).\n" + displayName: Containers Cpu + path: containers.cpu + - description: "RAM limits for the specified container. The suffix `Mi`\ + \ or `Gi`\n specifies Mebibytes or Gibibytes, respectively.\n" + displayName: Containers Memory + path: containers.memory + - description: "RAM limits for huge pages of the specified container with\ + \ a size of 2Mi. The suffix `Mi`\n or `Gi` specifies Mebibytes or Gibibytes,\ + \ respectively.\n" + displayName: Containers Huge Pages Hugepages-2 Mi + path: containers.hugePages.hugepages-2Mi + - description: "RAM limits for huge pages of the specified container with\ + \ a size of 1Gi. The suffix `Mi`\n or `Gi` specifies Mebibytes or Gibibytes,\ + \ respectively.\n" + displayName: Containers Huge Pages Hugepages-1 Gi + path: containers.hugePages.hugepages-1Gi + - description: "CPU(s) (cores) limits for the specified init container.\ + \ The suffix\n `m` specifies millicpus (where 1000m is equals to 1).\n" + displayName: Init Containers Cpu + path: initContainers.cpu + - description: "RAM limits for the specified init container. The suffix\ + \ `Mi`\n or `Gi` specifies Mebibytes or Gibibytes, respectively.\n" + displayName: Init Containers Memory + path: initContainers.memory + - description: "RAM limits for huge pages of the specified init container\ + \ with a size of 2Mi. The suffix `Mi`\n or `Gi` specifies Mebibytes\ + \ or Gibibytes, respectively.\n" + displayName: Init Containers Huge Pages Hugepages-2 Mi + path: initContainers.hugePages.hugepages-2Mi + - description: "RAM limits for huge pages of the specified init container\ + \ with a size of 1Gi. The suffix `Mi` or `Gi`\n specifies Mebibytes\ + \ or Gibibytes, respectively.\n" + displayName: Init Containers Huge Pages Hugepages-1 Gi + path: initContainers.hugePages.hugepages-1Gi + - description: "CPU(s) (cores) requests for every resource's Pod that reference\ + \ this SGInstanceProfile. The suffix `m`\n specifies millicpus (where\ + \ 1000m is equals to 1).\n\nBy default the cpu requests values in field\ + \ `.spec.requests.cpu` represent the total cpu requests assigned to\ + \ each resource's Pod that reference this SGInstanceProfile.\n The cpu\ + \ requests of the patroni container (that runs both Patroni and PostgreSQL)\ + \ is calculated by subtracting from the total cpu requests the cpu requests\ + \ of other containers that are present in the Pod.\n To change this\ + \ behavior and having the cpu requests values in field `.spec.requests.cpu`\ + \ to represent the cpu requests of the patroni container and the total\ + \ cpu requests\n calculated by adding the cpu requests of all the containers\ + \ (including the patroni container) you may set one or more of the following\ + \ fields to `true`\n (depending on the resource's Pods you need this\ + \ behaviour to be changed):\n \n* `SGCluster.spec.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.coordinator.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.ovewrites.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGDistributedLogs.spec.resources.disableResourcesRequestsSplitFromTotal`\n" + displayName: Requests Cpu + path: requests.cpu + - description: "RAM requests for every resource's Pod that reference this\ + \ SGInstanceProfile. The suffix `Mi` or `Gi`\n specifies Mebibytes\ + \ or Gibibytes, respectively.\n\nBy default the memory requests values\ + \ in field `.spec.requests.memory` represent the total memory requests\ + \ assigned to each resource's Pod that reference this SGInstanceProfile.\n\ + \ The memory requests of the patroni container (that runs both Patroni\ + \ and PostgreSQL) is calculated by subtracting from the total memory\ + \ requests the memory requests of other containers that are present\ + \ in the Pod.\n To change this behavior and having the memory requests\ + \ values in field `.spec.requests.memory` to represent the memory requests\ + \ of the patroni container and the total memory requests\n calculated\ + \ by adding the memory requests of all the containers (including the\ + \ patroni container) you may set one or more of the following fields\ + \ to `true`\n (depending on the resource's Pods you need this behaviour\ + \ to be changed):\n \n* `SGCluster.spec.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.coordinator.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.ovewrites.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGDistributedLogs.spec.resources.disableResourcesRequestsSplitFromTotal`\n" + displayName: Requests Memory + path: requests.memory + - description: "CPU(s) (cores) requests for the specified container. The\ + \ suffix `m`\n specifies millicpus (where 1000m is equals to 1).\n" + displayName: Requests Containers Cpu + path: requests.containers.cpu + - description: "RAM requests for the specified container. The suffix `Mi`\ + \ or `Gi`\n specifies Mebibytes or Gibibytes, respectively.\n" + displayName: Requests Containers Memory + path: requests.containers.memory + - description: "CPU(s) (cores) requests for the specified init container.\ + \ The suffix\n `m` specifies millicpus (where 1000m is equals to 1).\n" + displayName: Requests Init Containers Cpu + path: requests.initContainers.cpu + - description: "RAM requests for the specified init container. The suffix\ + \ `Mi`\n or `Gi` specifies Mebibytes or Gibibytes, respectively.\n" + displayName: Requests Init Containers Memory + path: requests.initContainers.memory + version: v1 + - description: Handle to an existing Object Storage (e.g. S3), used to store + backups + displayName: StackGres Object Storage + kind: SGObjectStorage + name: sgobjectstorages.stackgres.io + specDescriptors: + - description: "Determine the type of object storage used for storing the\ + \ base backups and WAL segments.\n Possible values:\n * `s3`:\ + \ Amazon Web Services S3 (Simple Storage Service).\n * `s3Compatible`:\ + \ non-AWS services that implement a compatibility API with AWS S3.\n\ + \ * `gcs`: Google Cloud Storage.\n * `azureBlob`: Microsoft\ + \ Azure Blob Storage.\n" + displayName: Type + path: type + - description: 'AWS S3 bucket name. + + ' + displayName: S3 Bucket + path: s3.bucket + - description: 'The AWS S3 region. The Region may be detected using s3:GetBucketLocation, + but if you wish to avoid giving permissions to this API call or forbid + it from the applicable IAM policy, you must then specify this property. + + ' + displayName: S3 Region + path: s3.region + - description: 'The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + to use for the backup object storage. By default, the `STANDARD` storage + class is used. Other supported values include `STANDARD_IA` for Infrequent + Access and `REDUCED_REDUNDANCY`. + + ' + displayName: S3 Storage Class + path: s3.storageClass + - description: 'AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `AKIAIOSFODNN7EXAMPLE`. + + ' + displayName: S3 Aws Credentials Secret Key Selectors Access Key Id + path: s3.awsCredentials.secretKeySelectors.accessKeyId + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: S3 Aws Credentials Secret Key Selectors Access Key Id Key + path: s3.awsCredentials.secretKeySelectors.accessKeyId.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: S3 Aws Credentials Secret Key Selectors Access Key Id Name + path: s3.awsCredentials.secretKeySelectors.accessKeyId.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. + + ' + displayName: S3 Aws Credentials Secret Key Selectors Secret Access Key + path: s3.awsCredentials.secretKeySelectors.secretAccessKey + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: S3 Aws Credentials Secret Key Selectors Secret Access Key + Key + path: s3.awsCredentials.secretKeySelectors.secretAccessKey.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: S3 Aws Credentials Secret Key Selectors Secret Access Key + Name + path: s3.awsCredentials.secretKeySelectors.secretAccessKey.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'Bucket name. + + ' + displayName: S3 Compatible Bucket + path: s3Compatible.bucket + - description: 'Enable path-style addressing (i.e. `http://s3.amazonaws.com/BUCKET/KEY`) + when connecting to an S3-compatible service that lacks support for sub-domain + style bucket URLs (i.e. `http://BUCKET.s3.amazonaws.com/KEY`). + + + Defaults to false. + + ' + displayName: S3 Compatible Enable Path Style Addressing + path: s3Compatible.enablePathStyleAddressing + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Overrides the default url to connect to an S3-compatible + service. + + For example: `http://s3-like-service:9000`. + + ' + displayName: S3 Compatible Endpoint + path: s3Compatible.endpoint + - description: 'The AWS S3 region. The Region may be detected using s3:GetBucketLocation, + but if you wish to avoid giving permissions to this API call or forbid + it from the applicable IAM policy, you must then specify this property. + + ' + displayName: S3 Compatible Region + path: s3Compatible.region + - description: 'The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + to use for the backup object storage. By default, the `STANDARD` storage + class is used. Other supported values include `STANDARD_IA` for Infrequent + Access and `REDUCED_REDUNDANCY`. + + ' + displayName: S3 Compatible Storage Class + path: s3Compatible.storageClass + - description: 'AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `AKIAIOSFODNN7EXAMPLE`. + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Access + Key Id + path: s3Compatible.awsCredentials.secretKeySelectors.accessKeyId + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Access + Key Id Key + path: s3Compatible.awsCredentials.secretKeySelectors.accessKeyId.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Access + Key Id Name + path: s3Compatible.awsCredentials.secretKeySelectors.accessKeyId.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Secret + Access Key + path: s3Compatible.awsCredentials.secretKeySelectors.secretAccessKey + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Secret + Access Key Key + path: s3Compatible.awsCredentials.secretKeySelectors.secretAccessKey.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Secret + Access Key Name + path: s3Compatible.awsCredentials.secretKeySelectors.secretAccessKey.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Ca Certificate + Key + path: s3Compatible.awsCredentials.secretKeySelectors.caCertificate.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: S3 Compatible Aws Credentials Secret Key Selectors Ca Certificate + Name + path: s3Compatible.awsCredentials.secretKeySelectors.caCertificate.name + - description: 'GCS bucket name. + + ' + displayName: Gcs Bucket + path: gcs.bucket + - description: 'If true, the credentials will be fetched from the GCE/GKE + metadata service and the field `secretKeySelectors` have to be set to + null or omitted. + + + This is useful when running StackGres inside a GKE cluster using [Workload + Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). + + ' + displayName: Gcs Gcp Credentials Fetch Credentials From Metadata Service + path: gcs.gcpCredentials.fetchCredentialsFromMetadataService + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'A service account key from GCP. In JSON format, as downloaded + from the GCP Console. + + ' + displayName: Gcs Gcp Credentials Secret Key Selectors Service Account + JSON + path: gcs.gcpCredentials.secretKeySelectors.serviceAccountJSON + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: Gcs Gcp Credentials Secret Key Selectors Service Account + JSON Key + path: gcs.gcpCredentials.secretKeySelectors.serviceAccountJSON.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: Gcs Gcp Credentials Secret Key Selectors Service Account + JSON Name + path: gcs.gcpCredentials.secretKeySelectors.serviceAccountJSON.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'Azure Blob Storage bucket name. + + ' + displayName: Azure Blob Bucket + path: azureBlob.bucket + - description: 'The [Storage Account](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview?toc=/azure/storage/blobs/toc.json) + that contains the Blob bucket to be used. + + ' + displayName: Azure Blob Azure Credentials Secret Key Selectors Storage + Account + path: azureBlob.azureCredentials.secretKeySelectors.storageAccount + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: Azure Blob Azure Credentials Secret Key Selectors Storage + Account Key + path: azureBlob.azureCredentials.secretKeySelectors.storageAccount.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: Azure Blob Azure Credentials Secret Key Selectors Storage + Account Name + path: azureBlob.azureCredentials.secretKeySelectors.storageAccount.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'The [storage account access key](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal). + + ' + displayName: Azure Blob Azure Credentials Secret Key Selectors Access + Key + path: azureBlob.azureCredentials.secretKeySelectors.accessKey + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The key of the secret to select from. Must be a valid secret + key. + + ' + displayName: Azure Blob Azure Credentials Secret Key Selectors Access + Key Key + path: azureBlob.azureCredentials.secretKeySelectors.accessKey.key + - description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + displayName: Azure Blob Azure Credentials Secret Key Selectors Access + Key Name + path: azureBlob.azureCredentials.secretKeySelectors.accessKey.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + version: v1beta1 + - description: Strongly validated customized Postgres configuration (postgresql.conf) + displayName: StackGres Postgres Configuration + kind: SGPostgresConfig + name: sgpgconfigs.stackgres.io + specDescriptors: + - description: 'The **major** Postgres version the configuration is for. + Postgres major versions contain one number starting with version 10 + (`10`, `11`, `12`, etc), and two numbers separated by a dot for previous + versions (`9.6`, `9.5`, etc). + + + Note that Postgres maintains full compatibility across minor versions, + and hence a configuration for a given major version will work for any + minor version of that same major version. + + + Check [StackGres component versions](https://stackgres.io/doc/latest/intro/versions) + to see the Postgres versions supported by this version of StackGres. + + ' + displayName: Postgres Version + path: postgresVersion + - displayName: Postgresql Conf + path: postgresql\.conf + statusDescriptors: + - displayName: Default Parameters + path: defaultParameters + version: v1 + - description: Customized PgBouncer (connection pooler) configuration + displayName: StackGres Connection Pooling Configuration + kind: SGPoolingConfig + name: sgpoolconfigs.stackgres.io + statusDescriptors: + - displayName: Pg Bouncer Default Parameters + path: pgBouncer.defaultParameters + version: v1 + - description: Managed SQL Scripts, used for initial SQL commands or migrations + displayName: StackGres Script + kind: SGScript + name: sgscripts.stackgres.io + specDescriptors: + - description: 'If `true` the versions will be managed by the operator automatically. + The user will still be able to update them if needed. `true` by default. + + ' + displayName: Managed Versions + path: managedVersions + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If `true`, when any script entry fail will not prevent subsequent + script entries from being executed. `false` by default. + + ' + displayName: Continue On Error + path: continueOnError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Name of the script. Must be unique across this SGScript. + + ' + displayName: Scripts Name + path: scripts.name + - description: 'The id is immutable and must be unique across all the script + entries. It is replaced by the operator and is used to identify the + script for the whole life of the `SGScript` object. + + ' + displayName: Scripts Id + path: scripts.id + - description: 'Version of the script. It will allow to identify if this + script entry has been changed. + + ' + displayName: Scripts Version + path: scripts.version + - description: 'Database where the script is executed. Defaults to the `postgres` + database, if not specified. + + ' + displayName: Scripts Database + path: scripts.database + - description: 'User that will execute the script. Defaults to the `postgres` + user. + + ' + displayName: Scripts User + path: scripts.user + - description: 'Wrap the script in a transaction using the specified transaction + mode: + + + * `read-committed`: The script will be wrapped in a transaction using + [READ COMMITTED](https://www.postgresql.org/docs/current/transaction-iso.html#XACT-READ-COMMITTED) + isolation level. + + * `repeatable-read`: The script will be wrapped in a transaction using + [REPEATABLE READ](https://www.postgresql.org/docs/current/transaction-iso.html#XACT-REPEATABLE-READ) + isolation level. + + * `serializable`: The script will be wrapped in a transaction using + [SERIALIZABLE](https://www.postgresql.org/docs/current/transaction-iso.html#XACT-SERIALIZABLE) + isolation level. + + + If not set the script entry will not be wrapped in a transaction + + ' + displayName: Scripts Wrap In Transaction + path: scripts.wrapInTransaction + - description: "When set to `true` the script entry execution will include\ + \ storing the status of the execution of this\n script entry in the\ + \ table `managed_sql.status` that will be created in the specified `database`.\ + \ This\n will avoid an operation that fails partially to be unrecoverable\ + \ requiring the intervention from the user\n if user in conjunction\ + \ with `retryOnError`.\n\nIf set to `true` then `wrapInTransaction`\ + \ field must be set.\n\nThis is `false` by default.\n" + displayName: Scripts Store Status In Database + path: scripts.storeStatusInDatabase + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "If not set or set to `false` the script entry will not be\ + \ retried if it fails.\n\nWhen set to `true` the script execution will\ + \ be retried with an exponential backoff of 5 minutes,\n starting from\ + \ 10 seconds and a standard deviation of 10 seconds.\n\nThis is `false`\ + \ by default.\n" + displayName: Scripts Retry On Error + path: scripts.retryOnError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Raw SQL script to execute. This field is mutually exclusive + with `scriptFrom` field. + + ' + displayName: Scripts Script + path: scripts.script + - description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the SQL script to execute. This field is mutually exclusive + with `configMapKeyRef` field. + + ' + displayName: Scripts Script From Secret Key Ref + path: scripts.scriptFrom.secretKeyRef + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Scripts Script From Secret Key Ref Name + path: scripts.scriptFrom.secretKeyRef.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Scripts Script From Secret Key Ref Key + path: scripts.scriptFrom.secretKeyRef.key + - description: 'A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + reference that contains the SQL script to execute. This field is mutually + exclusive with `secretKeyRef` field. + + ' + displayName: Scripts Script From Config Map Key Ref + path: scripts.scriptFrom.configMapKeyRef + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap + - description: 'The name of the ConfigMap that contains the SQL script to + execute. + + ' + displayName: Scripts Script From Config Map Key Ref Name + path: scripts.scriptFrom.configMapKeyRef.name + - description: 'The key name within the ConfigMap that contains the SQL + script to execute. + + ' + displayName: Scripts Script From Config Map Key Ref Key + path: scripts.scriptFrom.configMapKeyRef.key + statusDescriptors: + - description: 'The id that identifies a script entry. + + ' + displayName: Scripts Id + path: scripts.id + - description: 'The hash of a ConfigMap or Secret referenced with the associated + script entry. + + ' + displayName: Scripts Hash + path: scripts.hash + version: v1 + - description: Handle to a performed (or to be performed, if run manually) backup + for sharded clusters + displayName: StackGres Sharded Backup + kind: SGShardedBackup + name: sgshardedbackups.stackgres.io + specDescriptors: + - description: "The name of the `SGShardedCluster` from which this sharded\ + \ backup is/will be taken.\n\nIf this is a copy of an existing completed\ + \ sharded backup in a different namespace\n the value must be prefixed\ + \ with the namespace of the source backup and a\n dot `.` (e.g. `.`) or have the same value\n\ + \ if the source sharded backup is also a copy.\n" + displayName: Target SGShardedCluster + path: sgShardedCluster + - description: "Indicate if this sharded backup is permanent and should\ + \ not be removed by the automated\n retention policy. Default is `false`.\n" + displayName: Managed Lifecycle + path: managedLifecycle + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation will continue + until the backup completes or fail. If set to 0 is the same as not being + set. + + + Make sure to set a reasonable high value in order to allow for any unexpected + delays during backup creation (network low bandwidth, disk low throughput + and so forth). + + ' + displayName: Timeout + path: timeout + - description: "Allow to set a timeout for the reconciliation process that\ + \ take place after the backup.\n\nIf not set defaults to 300 (5 minutes).\ + \ If set to 0 it will disable timeout.\n\nFailure of reconciliation\ + \ will not make the backup fail and will be re-tried the next time a\ + \ SGBackup\n or shecduled backup Job take place.\n" + displayName: Reconciliation Timeout + path: reconciliationTimeout + - description: 'The maximum number of retries the backup operation is allowed + to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: `3`. + + ' + displayName: Max Retries + path: maxRetries + statusDescriptors: + - description: 'One of the SGBackups that compose the SGShardedBackup used + to restore the sharded cluster. + + ' + displayName: SGBackups + path: sgBackups + - description: 'Status of the sharded backup. + + ' + displayName: Process Status + path: process.status + x-descriptors: + - urn:alm:descriptor:io.kubernetes.phase + - description: 'If the status is `failed` this field will contain a message + indicating the failure reason. + + ' + displayName: Process Failure + path: process.failure + - description: 'Name of the pod assigned to the sharded backup. StackGres + utilizes internally a locking mechanism based on the pod name of the + job that creates the sharded backup. + + ' + displayName: Process Job Pod + path: process.jobPod + - description: 'Start time of sharded backup. + + ' + displayName: Process Timing Start + path: process.timing.start + - description: 'End time of sharded backup. + + ' + displayName: Process Timing End + path: process.timing.end + - description: 'Time at which the sharded backup is safely stored in the + object storage. + + ' + displayName: Process Timing Stored + path: process.timing.stored + - description: 'Postgres version of the server where the sharded backup + is taken from. + + ' + displayName: Backup Information Postgres Version + path: backupInformation.postgresVersion + - description: 'Size (in bytes) of the uncompressed sharded backup. + + ' + displayName: Backup Information Size Uncompressed + path: backupInformation.size.uncompressed + - description: 'Size (in bytes) of the compressed sharded backup. + + ' + displayName: Backup Information Size Compressed + path: backupInformation.size.compressed + version: v1 + - description: Manages Postgres sharded clusters (two or more SGClusters) + displayName: StackGres Sharded Cluster + kind: SGShardedCluster + name: sgshardedclusters.stackgres.io + specDescriptors: + - description: "The profile allow to change in a convenient place a set\ + \ of configuration defaults that affect how the cluster is generated.\n\ + \nAll those defaults can be overwritten by setting the correspoinding\ + \ fields.\n\nAvailable profiles are:\n\n* `production`:\n\n Prevents\ + \ two Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `false` by default).\n Sets both limits and requests using `SGInstanceProfile`\ + \ for `patroni` container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced `SGInstanceProfile`\ + \ for sidecar containers other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `testing`:\n\n Allows two Pods to running\ + \ in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Sets both limits and requests using `SGInstanceProfile`\ + \ for `patroni` container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced `SGInstanceProfile`\ + \ for sidecar containers other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `development`:\n\n Allows two Pods from\ + \ running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Unset both limits and requests for `patroni`\ + \ container that runs both Patroni and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `true` by default).\n Unsets requests for sidecar containers other\ + \ than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `true` by default).\n\n**Changing this field may require a restart.**\n" + displayName: Profile + path: profile + - description: 'The sharding technology that will be used for the sharded + cluster. + + + Available technologies are: + + + * `citus` + + * `ddp` + + * `shardingsphere` + + + **Citus** + + + Citus is a PostgreSQL extension that transforms Postgres into a distributed + database—so you can achieve high performance at any scale. + + + See also https://github.com/citusdata/citus + + + **DDP** + + + DDP (Distributed Data Partitioning) allows you to distribute data across + different physical nodes to improve the query performance of high data + volumes, taking advantage of distinct nodes’ resources. Using the entry + point named coordinator in charge of sending/distributing the queries + to different nodes named shards. + + + **ShardingSphere** + + + Apache ShardingSphere is an ecosystem to transform any database into + a distributed database system, and enhance it with sharding, elastic + scaling, encryption features & more. + + + StackGres implementation of ShardingSphere as a sharding technology + uses the [ShardingSphere Proxy](https://shardingsphere.apache.org/document/current/en/quick-start/shardingsphere-proxy-quick-start/) + as an entry point to distribute SQL traffic among the shards. + + + This implementation requires the [ShardingSphere Operator](https://shardingsphere.apache.org/oncloud/current/en/user-manual/cn-sn-operator/) + to be installed and will create a ComputeNode + + ' + displayName: Type + path: type + - description: 'The database name that will be created and used across all + node and where "partitioned" (distributed) tables will live in. + + ' + displayName: Database + path: database + - description: 'Postgres version used on the cluster. It is either of: + + * The string ''latest'', which automatically sets the latest major.minor + Postgres version. + + * A major version, like ''14'' or ''13'', which sets that major version + and the latest minor version. + + * A specific major.minor version, like ''14.4''. + + ' + displayName: Postgres Version + path: postgres.version + - description: 'Postgres flavor used on the cluster. It is either of: + + * `babelfish` will use the [Babelfish for Postgres](https://babelfish-for-postgresql.github.io/babelfish-for-postgresql/). + + + If not specified then the vanilla Postgres will be used for the cluster. + + + **This field can only be set on creation.** + + ' + displayName: Postgres Flavor + path: postgres.flavor + - description: The name of the extension to deploy. + displayName: Postgres Extensions Name + path: postgres.extensions.name + - description: The id of the publisher of the extension to deploy. If not + specified `com.ongres` will be used by default. + displayName: Postgres Extensions Publisher + path: postgres.extensions.publisher + - description: The version of the extension to deploy. If not specified + version of `stable` channel will be used by default. + displayName: Postgres Extensions Version + path: postgres.extensions.version + - description: 'The repository base URL from where to obtain the extension + to deploy. + + + **This section is filled by the operator.** + + ' + displayName: Postgres Extensions Repository + path: postgres.extensions.repository + - description: 'Allow to enable SSL for connections to Postgres. By default + is `true`. + + + If `true` certificate and private key will be auto-generated unless + fields `certificateSecretKeySelector` and `privateKeySecretKeySelector` + are specified. + + ' + displayName: Postgres Ssl Enabled + path: postgres.ssl.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Secret key selector for the certificate or certificate chain + used for SSL connections. + + ' + displayName: Postgres Ssl Certificate Secret Key Selector + path: postgres.ssl.certificateSecretKeySelector + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The name of Secret that contains the certificate or certificate + chain for SSL connections + + ' + displayName: Postgres Ssl Certificate Secret Key Selector Name + path: postgres.ssl.certificateSecretKeySelector.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'The key of Secret that contains the certificate or certificate + chain for SSL connections + + ' + displayName: Postgres Ssl Certificate Secret Key Selector Key + path: postgres.ssl.certificateSecretKeySelector.key + - description: 'Secret key selector for the private key used for SSL connections. + + ' + displayName: Postgres Ssl Private Key Secret Key Selector + path: postgres.ssl.privateKeySecretKeySelector + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The name of Secret that contains the private key for SSL + connections + + ' + displayName: Postgres Ssl Private Key Secret Key Selector Name + path: postgres.ssl.privateKeySecretKeySelector.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: 'The key of Secret that contains the private key for SSL + connections + + ' + displayName: Postgres Ssl Private Key Secret Key Selector Key + path: postgres.ssl.privateKeySecretKeySelector.key + - description: "The replication mode applied to the whole cluster.\nPossible\ + \ values are:\n* `async` (default)\n* `sync`\n* `strict-sync`\n* `sync-all`\n\ + * `strict-sync-all`\n\n**async**\n\nWhen in asynchronous mode the cluster\ + \ is allowed to lose some committed transactions.\n When the primary\ + \ server fails or becomes unavailable for any other reason a sufficiently\ + \ healthy standby\n will automatically be promoted to primary. Any\ + \ transactions that have not been replicated to that standby\n remain\ + \ in a \"forked timeline\" on the primary, and are effectively unrecoverable\ + \ (the data is still there,\n but recovering it requires a manual recovery\ + \ effort by data recovery specialists).\n\n**sync**\n\nWhen in synchronous\ + \ mode a standby will not be promoted unless it is certain that the\ + \ standby contains all\n transactions that may have returned a successful\ + \ commit status to client (clients can change the behavior\n per transaction\ + \ using PostgreSQL’s `synchronous_commit` setting. Transactions with\ + \ `synchronous_commit`\n values of `off` and `local` may be lost on\ + \ fail over, but will not be blocked by replication delays). This\n\ + \ means that the system may be unavailable for writes even though some\ + \ servers are available. System\n administrators can still use manual\ + \ failover commands to promote a standby even if it results in transaction\n\ + \ loss.\n\nSynchronous mode does not guarantee multi node durability\ + \ of commits under all circumstances. When no suitable\n standby is\ + \ available, primary server will still accept writes, but does not guarantee\ + \ their replication. When\n the primary fails in this mode no standby\ + \ will be promoted. When the host that used to be the primary comes\n\ + \ back it will get promoted automatically, unless system administrator\ + \ performed a manual failover. This behavior\n makes synchronous mode\ + \ usable with 2 node clusters.\n\nWhen synchronous mode is used and\ + \ a standby crashes, commits will block until the primary is switched\ + \ to standalone\n mode. Manually shutting down or restarting a standby\ + \ will not cause a commit service interruption. Standby will\n signal\ + \ the primary to release itself from synchronous standby duties before\ + \ PostgreSQL shutdown is initiated.\n\n**strict-sync**\n\nWhen it is\ + \ absolutely necessary to guarantee that each write is stored durably\ + \ on at least two nodes, use the strict\n synchronous mode. This mode\ + \ prevents synchronous replication to be switched off on the primary\ + \ when no synchronous\n standby candidates are available. As a downside,\ + \ the primary will not be available for writes (unless the Postgres\n\ + \ transaction explicitly turns off `synchronous_mode` parameter), blocking\ + \ all client write requests until at least one\n synchronous replica\ + \ comes up.\n\n**Note**: Because of the way synchronous replication\ + \ is implemented in PostgreSQL it is still possible to lose\n transactions\ + \ even when using strict synchronous mode. If the PostgreSQL backend\ + \ is cancelled while waiting to acknowledge\n replication (as a result\ + \ of packet cancellation due to client timeout or backend failure) transaction\ + \ changes become\n visible for other backends. Such changes are not\ + \ yet replicated and may be lost in case of standby promotion.\n\n**sync-all**\n\ + \nThe same as `sync` but `syncInstances` is ignored and the number of\ + \ synchronous instances is equals to the total number\n of instances\ + \ less one.\n\n**strict-sync-all**\n\nThe same as `strict-sync` but\ + \ `syncInstances` is ignored and the number of synchronous instances\ + \ is equals to the total number\n of instances less one.\n" + displayName: Replication Mode + path: replication.mode + - description: "Number of synchronous standby instances. Must be less than\ + \ the total number of instances. It is set to 1 by default.\n Only\ + \ setteable if mode is `sync` or `strict-sync`.\n" + displayName: Replication Sync Instances + path: replication.syncInstances + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - description: "Allow to specify how the replicas are initialized.\n\nPossible\ + \ values are:\n\n* `FromPrimary`: When this mode is used replicas will\ + \ be always created from the primary using `pg_basebackup`.\n* `FromReplica`:\ + \ When this mode is used replicas will be created from another existing\ + \ replica using\n `pg_basebackup`. Fallsback to `FromPrimary` if there's\ + \ no replica or it fails.\n* `FromExistingBackup`: When this mode is\ + \ used replicas will be created from an existing SGBackup. If `backupNewerThan`\ + \ is set\n the SGBackup must be newer than its value. When this mode\ + \ fails to restore an SGBackup it will try with a previous one (if exists).\n\ + \ Fallsback to `FromReplica` if there's no backup left or it fails.\n\ + * `FromNewlyCreatedBackup`: When this mode is used replicas will be\ + \ created from a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent backup newer than\ + \ its value or it fails.\n" + displayName: Replication Initialization Mode + path: replication.initialization.mode + - description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`, that\ + \ specifies how old an SGBackup have to be in order to be seleceted\n\ + \ to initialize a replica.\n\nWhen `FromExistingBackup` mode is set\ + \ this field restrict the selection of SGBackup to be used for recovery\ + \ newer than the\n specified value. \n\nWhen `FromNewlyCreatedBackup`\ + \ mode is set this field skip the creation SGBackup to be used for recovery\ + \ if one newer than\n the specified value exists. \n" + displayName: Replication Initialization Backup Newer Than + path: replication.initialization.backupNewerThan + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Replication Initialization Backup Restore Performance Max + Network Bandwidth + path: replication.initialization.backupRestorePerformance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Replication Initialization Backup Restore Performance Max + Disk Bandwidth + path: replication.initialization.backupRestorePerformance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Replication Initialization Backup Restore Performance Download + Concurrency + path: replication.initialization.backupRestorePerformance.downloadConcurrency + - description: Specify if the service should be created or not. + displayName: Postgres Services Coordinator Any Enabled + path: postgresServices.coordinator.any.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'type determines how the Service is exposed. Defaults to + ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to endpoints. + + "NodePort" builds on ClusterIP and allocates a port on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + displayName: Postgres Services Coordinator Any Type + path: postgresServices.coordinator.any.type + - description: allocateLoadBalancerNodePorts defines if NodePorts will be + automatically allocated for services with type LoadBalancer. Default + is "true". It may be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific NodePorts (by + specifying a value), those requests will be respected, regardless of + this field. This field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any other type. + displayName: Postgres Services Coordinator Any Allocate Load Balancer + Node Ports + path: postgresServices.coordinator.any.allocateLoadBalancerNodePorts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Postgres Services Coordinator Any External I Ps + path: postgresServices.coordinator.any.externalIPs + - description: externalTrafficPolicy describes how nodes distribute service + traffic they receive on one of the Service's "externally-facing" addresses + (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the + proxy will configure the service in a way that assumes that external + load balancers will take care of balancing the service traffic between + nodes, and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client source IP. + (Traffic mistakenly sent to a node with no endpoints will be dropped.) + The default value, "Cluster", uses the standard behavior of routing + to all endpoints evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer IP from within + the cluster will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to take traffic policy + into account when picking a node. + displayName: Postgres Services Coordinator Any External Traffic Policy + path: postgresServices.coordinator.any.externalTrafficPolicy + - description: healthCheckNodePort specifies the healthcheck nodePort for + the service. This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, a value will + be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). This + field cannot be updated once set. + displayName: Postgres Services Coordinator Any Health Check Node Port + path: postgresServices.coordinator.any.healthCheckNodePort + - description: InternalTrafficPolicy describes how nodes distribute service + traffic they receive on the ClusterIP. If set to "Local", the proxy + will assume that pods only want to talk to endpoints of the service + on the same node as the pod, dropping the traffic if there are no local + endpoints. The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified by topology and + other features). + displayName: Postgres Services Coordinator Any Internal Traffic Policy + path: postgresServices.coordinator.any.internalTrafficPolicy + - displayName: Postgres Services Coordinator Any Ip Families + path: postgresServices.coordinator.any.ipFamilies + - description: IPFamilyPolicy represents the dual-stack-ness requested or + required by this Service. If there is no value provided, then this field + will be set to SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on dual-stack configured + clusters or a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). + The ipFamilies and clusterIPs fields depend on the value of this field. + This field will be wiped when updating a service to type ExternalName. + displayName: Postgres Services Coordinator Any Ip Family Policy + path: postgresServices.coordinator.any.ipFamilyPolicy + - description: loadBalancerClass is the class of the load balancer implementation + this Service belongs to. If specified, the value of this field must + be a label-style identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is used, today + this is typically done through the cloud provider integration, but should + apply for any default implementation. If set, it is assumed that a load + balancer implementation is watching for Services with a matching class. + Any default load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only be set when + creating or updating a Service to type 'LoadBalancer'. Once set, it + can not be changed. This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + displayName: Postgres Services Coordinator Any Load Balancer Class + path: postgresServices.coordinator.any.loadBalancerClass + - description: 'Only applies to Service Type: LoadBalancer. This feature + depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. This field will + be ignored if the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. Users are + encouraged to use implementation-specific annotations when available.' + displayName: Postgres Services Coordinator Any Load Balancer IP + path: postgresServices.coordinator.any.loadBalancerIP + - displayName: Postgres Services Coordinator Any Load Balancer Source Ranges + path: postgresServices.coordinator.any.loadBalancerSourceRanges + - description: publishNotReadyAddresses indicates that any agent which deals + with endpoints for this Service should disregard any indications of + ready/not-ready. The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes controllers that + generate Endpoints and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" even if the Pods + themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this + behavior. + displayName: Postgres Services Coordinator Any Publish Not Ready Addresses + path: postgresServices.coordinator.any.publishNotReadyAddresses + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Supports "ClientIP" and "None". Used to maintain session + affinity. Enable client IP based session affinity. Must be ClientIP + or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + displayName: Postgres Services Coordinator Any Session Affinity + path: postgresServices.coordinator.any.sessionAffinity + - description: timeoutSeconds specifies the seconds of ClientIP type session + sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + displayName: Postgres Services Coordinator Any Session Affinity Config + Client IP Timeout Seconds + path: postgresServices.coordinator.any.sessionAffinityConfig.clientIP.timeoutSeconds + - description: the node port that will be exposed to connect to Postgres + instance + displayName: Postgres Services Coordinator Any Node Ports Pgport + path: postgresServices.coordinator.any.nodePorts.pgport + - description: the node port that will be exposed to connect to Postgres + instance for replication purpose + displayName: Postgres Services Coordinator Any Node Ports Replicationport + path: postgresServices.coordinator.any.nodePorts.replicationport + - description: Specify if the service should be created or not. + displayName: Postgres Services Coordinator Primary Enabled + path: postgresServices.coordinator.primary.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'type determines how the Service is exposed. Defaults to + ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to endpoints. + + "NodePort" builds on ClusterIP and allocates a port on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + displayName: Postgres Services Coordinator Primary Type + path: postgresServices.coordinator.primary.type + - description: allocateLoadBalancerNodePorts defines if NodePorts will be + automatically allocated for services with type LoadBalancer. Default + is "true". It may be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific NodePorts (by + specifying a value), those requests will be respected, regardless of + this field. This field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any other type. + displayName: Postgres Services Coordinator Primary Allocate Load Balancer + Node Ports + path: postgresServices.coordinator.primary.allocateLoadBalancerNodePorts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Postgres Services Coordinator Primary External I Ps + path: postgresServices.coordinator.primary.externalIPs + - description: externalTrafficPolicy describes how nodes distribute service + traffic they receive on one of the Service's "externally-facing" addresses + (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the + proxy will configure the service in a way that assumes that external + load balancers will take care of balancing the service traffic between + nodes, and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client source IP. + (Traffic mistakenly sent to a node with no endpoints will be dropped.) + The default value, "Cluster", uses the standard behavior of routing + to all endpoints evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer IP from within + the cluster will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to take traffic policy + into account when picking a node. + displayName: Postgres Services Coordinator Primary External Traffic Policy + path: postgresServices.coordinator.primary.externalTrafficPolicy + - description: healthCheckNodePort specifies the healthcheck nodePort for + the service. This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, a value will + be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). This + field cannot be updated once set. + displayName: Postgres Services Coordinator Primary Health Check Node Port + path: postgresServices.coordinator.primary.healthCheckNodePort + - description: InternalTrafficPolicy describes how nodes distribute service + traffic they receive on the ClusterIP. If set to "Local", the proxy + will assume that pods only want to talk to endpoints of the service + on the same node as the pod, dropping the traffic if there are no local + endpoints. The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified by topology and + other features). + displayName: Postgres Services Coordinator Primary Internal Traffic Policy + path: postgresServices.coordinator.primary.internalTrafficPolicy + - displayName: Postgres Services Coordinator Primary Ip Families + path: postgresServices.coordinator.primary.ipFamilies + - description: IPFamilyPolicy represents the dual-stack-ness requested or + required by this Service. If there is no value provided, then this field + will be set to SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on dual-stack configured + clusters or a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). + The ipFamilies and clusterIPs fields depend on the value of this field. + This field will be wiped when updating a service to type ExternalName. + displayName: Postgres Services Coordinator Primary Ip Family Policy + path: postgresServices.coordinator.primary.ipFamilyPolicy + - description: loadBalancerClass is the class of the load balancer implementation + this Service belongs to. If specified, the value of this field must + be a label-style identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is used, today + this is typically done through the cloud provider integration, but should + apply for any default implementation. If set, it is assumed that a load + balancer implementation is watching for Services with a matching class. + Any default load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only be set when + creating or updating a Service to type 'LoadBalancer'. Once set, it + can not be changed. This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + displayName: Postgres Services Coordinator Primary Load Balancer Class + path: postgresServices.coordinator.primary.loadBalancerClass + - description: 'Only applies to Service Type: LoadBalancer. This feature + depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. This field will + be ignored if the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. Users are + encouraged to use implementation-specific annotations when available.' + displayName: Postgres Services Coordinator Primary Load Balancer IP + path: postgresServices.coordinator.primary.loadBalancerIP + - displayName: Postgres Services Coordinator Primary Load Balancer Source + Ranges + path: postgresServices.coordinator.primary.loadBalancerSourceRanges + - description: publishNotReadyAddresses indicates that any agent which deals + with endpoints for this Service should disregard any indications of + ready/not-ready. The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes controllers that + generate Endpoints and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" even if the Pods + themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this + behavior. + displayName: Postgres Services Coordinator Primary Publish Not Ready Addresses + path: postgresServices.coordinator.primary.publishNotReadyAddresses + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Supports "ClientIP" and "None". Used to maintain session + affinity. Enable client IP based session affinity. Must be ClientIP + or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + displayName: Postgres Services Coordinator Primary Session Affinity + path: postgresServices.coordinator.primary.sessionAffinity + - description: timeoutSeconds specifies the seconds of ClientIP type session + sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + displayName: Postgres Services Coordinator Primary Session Affinity Config + Client IP Timeout Seconds + path: postgresServices.coordinator.primary.sessionAffinityConfig.clientIP.timeoutSeconds + - description: the node port that will be exposed to connect to Postgres + instance + displayName: Postgres Services Coordinator Primary Node Ports Pgport + path: postgresServices.coordinator.primary.nodePorts.pgport + - description: the node port that will be exposed to connect to Postgres + instance for replication purpose + displayName: Postgres Services Coordinator Primary Node Ports Replicationport + path: postgresServices.coordinator.primary.nodePorts.replicationport + - description: "The application protocol for this port. This is used as\ + \ a hint for implementations to offer richer behavior for protocols\ + \ that they understand. This field follows standard Kubernetes label\ + \ syntax. Valid values are either:\n\n* Un-prefixed protocol names -\ + \ reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\ + \n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2\ + \ prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n\ + \ * 'kubernetes.io/ws' - WebSocket over cleartext as described in\ + \ https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' -\ + \ WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \n* Other protocols should use implementation-defined prefixed names\ + \ such as mycompany.com/my-custom-protocol." + displayName: Postgres Services Coordinator Custom Ports App Protocol + path: postgresServices.coordinator.customPorts.appProtocol + - description: The name of this port within the service. This must be a + DNS_LABEL. All ports within a ServiceSpec must have unique names. When + considering the endpoints for a Service, this must match the 'name' + field in the EndpointPort. Optional if only one ServicePort is defined + on this service. + displayName: Postgres Services Coordinator Custom Ports Name + path: postgresServices.coordinator.customPorts.name + - description: 'The port on each node on which this service is exposed when + type is NodePort or LoadBalancer. Usually assigned by the system. If + a value is specified, in-range, and not in use it will be used, otherwise + the operation will fail. If not specified, a port will be allocated + if this Service requires one. If this field is specified when creating + a Service which does not need it, creation will fail. This field will + be wiped when updating a Service to no longer need it (e.g. changing + type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + displayName: Postgres Services Coordinator Custom Ports Node Port + path: postgresServices.coordinator.customPorts.nodePort + - description: The port that will be exposed by this service. + displayName: Postgres Services Coordinator Custom Ports Port + path: postgresServices.coordinator.customPorts.port + - description: The IP protocol for this port. Supports "TCP", "UDP", and + "SCTP". Default is TCP. + displayName: Postgres Services Coordinator Custom Ports Protocol + path: postgresServices.coordinator.customPorts.protocol + - description: Specify if the service should be created or not. + displayName: Postgres Services Shards Primaries Enabled + path: postgresServices.shards.primaries.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'type determines how the Service is exposed. Defaults to + ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to endpoints. + + "NodePort" builds on ClusterIP and allocates a port on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + displayName: Postgres Services Shards Primaries Type + path: postgresServices.shards.primaries.type + - description: allocateLoadBalancerNodePorts defines if NodePorts will be + automatically allocated for services with type LoadBalancer. Default + is "true". It may be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific NodePorts (by + specifying a value), those requests will be respected, regardless of + this field. This field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any other type. + displayName: Postgres Services Shards Primaries Allocate Load Balancer + Node Ports + path: postgresServices.shards.primaries.allocateLoadBalancerNodePorts + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Postgres Services Shards Primaries External I Ps + path: postgresServices.shards.primaries.externalIPs + - description: externalTrafficPolicy describes how nodes distribute service + traffic they receive on one of the Service's "externally-facing" addresses + (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the + proxy will configure the service in a way that assumes that external + load balancers will take care of balancing the service traffic between + nodes, and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client source IP. + (Traffic mistakenly sent to a node with no endpoints will be dropped.) + The default value, "Cluster", uses the standard behavior of routing + to all endpoints evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer IP from within + the cluster will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to take traffic policy + into account when picking a node. + displayName: Postgres Services Shards Primaries External Traffic Policy + path: postgresServices.shards.primaries.externalTrafficPolicy + - description: healthCheckNodePort specifies the healthcheck nodePort for + the service. This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, a value will + be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). This + field cannot be updated once set. + displayName: Postgres Services Shards Primaries Health Check Node Port + path: postgresServices.shards.primaries.healthCheckNodePort + - description: InternalTrafficPolicy describes how nodes distribute service + traffic they receive on the ClusterIP. If set to "Local", the proxy + will assume that pods only want to talk to endpoints of the service + on the same node as the pod, dropping the traffic if there are no local + endpoints. The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified by topology and + other features). + displayName: Postgres Services Shards Primaries Internal Traffic Policy + path: postgresServices.shards.primaries.internalTrafficPolicy + - displayName: Postgres Services Shards Primaries Ip Families + path: postgresServices.shards.primaries.ipFamilies + - description: IPFamilyPolicy represents the dual-stack-ness requested or + required by this Service. If there is no value provided, then this field + will be set to SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on dual-stack configured + clusters or a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). + The ipFamilies and clusterIPs fields depend on the value of this field. + This field will be wiped when updating a service to type ExternalName. + displayName: Postgres Services Shards Primaries Ip Family Policy + path: postgresServices.shards.primaries.ipFamilyPolicy + - description: loadBalancerClass is the class of the load balancer implementation + this Service belongs to. If specified, the value of this field must + be a label-style identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is used, today + this is typically done through the cloud provider integration, but should + apply for any default implementation. If set, it is assumed that a load + balancer implementation is watching for Services with a matching class. + Any default load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only be set when + creating or updating a Service to type 'LoadBalancer'. Once set, it + can not be changed. This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + displayName: Postgres Services Shards Primaries Load Balancer Class + path: postgresServices.shards.primaries.loadBalancerClass + - description: 'Only applies to Service Type: LoadBalancer. This feature + depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. This field will + be ignored if the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. Users are + encouraged to use implementation-specific annotations when available.' + displayName: Postgres Services Shards Primaries Load Balancer IP + path: postgresServices.shards.primaries.loadBalancerIP + - displayName: Postgres Services Shards Primaries Load Balancer Source Ranges + path: postgresServices.shards.primaries.loadBalancerSourceRanges + - description: publishNotReadyAddresses indicates that any agent which deals + with endpoints for this Service should disregard any indications of + ready/not-ready. The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes controllers that + generate Endpoints and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" even if the Pods + themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this + behavior. + displayName: Postgres Services Shards Primaries Publish Not Ready Addresses + path: postgresServices.shards.primaries.publishNotReadyAddresses + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Supports "ClientIP" and "None". Used to maintain session + affinity. Enable client IP based session affinity. Must be ClientIP + or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + displayName: Postgres Services Shards Primaries Session Affinity + path: postgresServices.shards.primaries.sessionAffinity + - description: timeoutSeconds specifies the seconds of ClientIP type session + sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + displayName: Postgres Services Shards Primaries Session Affinity Config + Client IP Timeout Seconds + path: postgresServices.shards.primaries.sessionAffinityConfig.clientIP.timeoutSeconds + - description: the node port that will be exposed to connect to Postgres + instance + displayName: Postgres Services Shards Primaries Node Ports Pgport + path: postgresServices.shards.primaries.nodePorts.pgport + - description: the node port that will be exposed to connect to Postgres + instance for replication purpose + displayName: Postgres Services Shards Primaries Node Ports Replicationport + path: postgresServices.shards.primaries.nodePorts.replicationport + - description: "The application protocol for this port. This is used as\ + \ a hint for implementations to offer richer behavior for protocols\ + \ that they understand. This field follows standard Kubernetes label\ + \ syntax. Valid values are either:\n\n* Un-prefixed protocol names -\ + \ reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\ + \n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2\ + \ prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n\ + \ * 'kubernetes.io/ws' - WebSocket over cleartext as described in\ + \ https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' -\ + \ WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \n* Other protocols should use implementation-defined prefixed names\ + \ such as mycompany.com/my-custom-protocol." + displayName: Postgres Services Shards Custom Ports App Protocol + path: postgresServices.shards.customPorts.appProtocol + - description: The name of this port within the service. This must be a + DNS_LABEL. All ports within a ServiceSpec must have unique names. When + considering the endpoints for a Service, this must match the 'name' + field in the EndpointPort. Optional if only one ServicePort is defined + on this service. + displayName: Postgres Services Shards Custom Ports Name + path: postgresServices.shards.customPorts.name + - description: 'The port on each node on which this service is exposed when + type is NodePort or LoadBalancer. Usually assigned by the system. If + a value is specified, in-range, and not in use it will be used, otherwise + the operation will fail. If not specified, a port will be allocated + if this Service requires one. If this field is specified when creating + a Service which does not need it, creation will fail. This field will + be wiped when updating a Service to no longer need it (e.g. changing + type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + displayName: Postgres Services Shards Custom Ports Node Port + path: postgresServices.shards.customPorts.nodePort + - description: The port that will be exposed by this service. + displayName: Postgres Services Shards Custom Ports Port + path: postgresServices.shards.customPorts.port + - description: The IP protocol for this port. Supports "TCP", "UDP", and + "SCTP". Default is TCP. + displayName: Postgres Services Shards Custom Ports Protocol + path: postgresServices.shards.customPorts.protocol + - description: 'If set to `true`, avoids creating the Prometheus exporter + sidecar. Recommended when there''s no intention to use internal monitoring. + + + **Changing this field may require a restart.** + + ' + displayName: Configurations Observability Disable Metrics + path: configurations.observability.disableMetrics + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Indicate the receiver in the configuration for the collector + scraper (if not specified, will default to prometheus). + displayName: Configurations Observability Receiver + path: configurations.observability.receiver + - description: If set to `true`, a PodMonitor is created for each Prometheus + instance as specified in the SGConfig.spec.collector.prometheusOperator.monitors + section. + displayName: Configurations Observability Prometheus Autobind + path: configurations.observability.prometheusAutobind + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Specifies the backup compression algorithm. Possible options + are: lz4, lzma, brotli. The default method is `lz4`. LZ4 is the fastest + method, but compression ratio is the worst. LZMA is way slower, but + it compresses backups about 6 times better than LZ4. Brotli is a good + trade-off between speed and compression ratio, being about 3 times better + than LZ4. + + ' + displayName: Configurations Backups Compression + path: configurations.backups.compression + - description: 'Continuous Archiving backups are composed of periodic *base + backups* and all the WAL segments produced in between those base backups + for the coordinator and each shard. This parameter specifies at what + time and with what frequency to start performing a new base backup. + + + Use cron syntax (`m h dom mon dow`) for this parameter, i.e., 5 values + separated by spaces: + + * `m`: minute, 0 to 59. + + * `h`: hour, 0 to 23. + + * `dom`: day of month, 1 to 31 (recommended not to set it higher than + 28). + + * `mon`: month, 1 to 12. + + * `dow`: day of week, 0 to 7 (0 and 7 both represent Sunday). + + + Also ranges of values (`start-end`), the symbol `*` (meaning `first-last`) + or even `*/N`, where `N` is a number, meaning ""every `N`, may be used. + All times are UTC. It is recommended to avoid 00:00 as base backup time, + to avoid overlapping with any other external operations happening at + this time. + + + If not set, full backups are never performed automatically. + + ' + displayName: Configurations Backups Cron Schedule + path: configurations.backups.cronSchedule + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Configurations Backups Performance Max Network Bandwidth + path: configurations.backups.performance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Configurations Backups Performance Max Disk Bandwidth + path: configurations.backups.performance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to store + the data. This parameter configures the number of parallel streams to + use to reading from disk. By default, it''s set to 1. + + ' + displayName: Configurations Backups Performance Upload Disk Concurrency + path: configurations.backups.performance.uploadDiskConcurrency + - description: 'Backup storage may use several concurrent streams to store + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to 16. + + ' + displayName: Configurations Backups Performance Upload Concurrency + path: configurations.backups.performance.uploadConcurrency + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Configurations Backups Performance Download Concurrency + path: configurations.backups.performance.downloadConcurrency + - description: 'When an automatic retention policy is defined to delete + old base backups, this parameter specifies the number of base backups + to keep, in a sliding window. + + + Consequently, the time range covered by backups is `periodicity*retention`, + where `periodicity` is the separation between backups as specified by + the `cronSchedule` property. + + + Default is 5. + + ' + displayName: Configurations Backups Retention + path: configurations.backups.retention + - description: 'Name of the [SGObjectStorage](https://stackgres.io/doc/latest/reference/crd/sgobjectstorage) + to use for the cluster. It defines the location in which the the backups + will be stored. + + ' + displayName: Configurations Backups SGObjectStorage + path: configurations.backups.sgObjectStorage + - displayName: Configurations Backups Paths + path: configurations.backups.paths + - description: 'If specified SGBackup will use VolumeSnapshot to create + backups. + + + This functionality still require to store WAL files in an SGObjectStorage + but could result in much faster backups and restore of those backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + displayName: Configurations Backups Use Volume Snapshot + path: configurations.backups.useVolumeSnapshot + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The name of the VolumeSnaphostClass to use to create the + VolumeSnapshot for backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + displayName: Configurations Backups Volume Snapshot Class + path: configurations.backups.volumeSnapshotClass + - description: 'If specified SGBackup will create a backup forcing a fast + start (by setting parameter `fast` to `true` when calling `pg_backup_start`) + that will reduce the time the backups may take at the expense of more + IO usage. + + + See also https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-LOWLEVEL-BASE-BACKUP + + ' + displayName: Configurations Backups Fast Volume Snapshot + path: configurations.backups.fastVolumeSnapshot + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation will continue + until the backup completes or fail. If set to 0 is the same as not being + set. + + + Make sure to set a reasonable high value in order to allow for any unexpected + delays during backup creation (network low bandwidth, disk low throughput + and so forth). + + ' + displayName: Configurations Backups Timeout + path: configurations.backups.timeout + - description: "Allow to set a timeout for the reconciliation process that\ + \ take place after the backup.\n\nIf not set defaults to 300 (5 minutes).\ + \ If set to 0 it will disable timeout.\n\nFailure of reconciliation\ + \ will not make the backup fail and will be re-tried the next time a\ + \ SGBackup\n or shecduled backup Job take place.\n" + displayName: Configurations Backups Reconciliation Timeout + path: configurations.backups.reconciliationTimeout + - description: 'If specified, WAL created after any unmanaged lifecycle + backups will be retained. + + ' + displayName: Configurations Backups Retain Wals For Unmanaged Lifecycle + path: configurations.backups.retainWalsForUnmanagedLifecycle + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Patroni Rest Api Password Name + path: configurations.credentials.patroni.restApiPassword.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Patroni Rest Api Password Key + path: configurations.credentials.patroni.restApiPassword.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Superuser Username Name + path: configurations.credentials.users.superuser.username.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Superuser Username Key + path: configurations.credentials.users.superuser.username.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Superuser Password Name + path: configurations.credentials.users.superuser.password.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Superuser Password Key + path: configurations.credentials.users.superuser.password.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Replication Username Name + path: configurations.credentials.users.replication.username.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Replication Username Key + path: configurations.credentials.users.replication.username.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Replication Password Name + path: configurations.credentials.users.replication.password.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Replication Password Key + path: configurations.credentials.users.replication.password.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Authenticator Username Name + path: configurations.credentials.users.authenticator.username.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Authenticator Username Key + path: configurations.credentials.users.authenticator.username.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Configurations Credentials Users Authenticator Password Name + path: configurations.credentials.users.authenticator.password.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Configurations Credentials Users Authenticator Password Key + path: configurations.credentials.users.authenticator.password.key + - description: It's the reference of custom provider name. If not specified, + then the default value will be `stackgres` + displayName: Configurations Binding Provider + path: configurations.binding.provider + - description: Allow to specify the database name. If not specified, then + the default value is `postgres` + displayName: Configurations Binding Database + path: configurations.binding.database + - description: Allow to specify the username. If not specified, then the + superuser username will be used. + displayName: Configurations Binding Username + path: configurations.binding.username + - description: The name of the Secret + displayName: Configurations Binding Password Name + path: configurations.binding.password.name + - description: The key of the Secret + displayName: Configurations Binding Password Key + path: configurations.binding.password.key + - displayName: Metadata Annotations All Resources + path: metadata.annotations.allResources + - displayName: Metadata Annotations Cluster Pods + path: metadata.annotations.clusterPods + - displayName: Metadata Annotations Services + path: metadata.annotations.services + - displayName: Metadata Annotations Primary Service + path: metadata.annotations.primaryService + - displayName: Metadata Annotations Replicas Service + path: metadata.annotations.replicasService + - displayName: Metadata Labels Cluster Pods + path: metadata.labels.clusterPods + - displayName: Metadata Labels Services + path: metadata.labels.services + - description: "Number of StackGres instances for the cluster. Each instance\ + \ contains one Postgres server.\n Out of all of the Postgres servers,\ + \ one is elected as the primary, the rest remain as read-only replicas.\n\ + \nIf sharding type is `shardingsphere` then, instead of an SGCluster\ + \ a ComputeNode will be created.\n\nSee also https://shardingsphere.apache.org/oncloud/current/en/user-manual/cn-sn-operator/#computenode\ + \ \n" + displayName: Coordinator Instances + path: coordinator.instances + - description: 'Allow to enable or disable any of horizontal and vertical + Pod autoscaling. + + + Possible values are: + + * `vertical`: only vertical Pod autoscaling will be enabled (default) + + * `none`: all autoscaling will be disabled + + ' + displayName: Coordinator Autoscaling Mode + path: coordinator.autoscaling.mode + - description: The minimum allowed CPU for the patroni container + displayName: Coordinator Autoscaling Min Allowed Patroni Cpu + path: coordinator.autoscaling.minAllowed.patroni.cpu + - description: The minimum allowed memory for the patroni container + displayName: Coordinator Autoscaling Min Allowed Patroni Memory + path: coordinator.autoscaling.minAllowed.patroni.memory + - description: The minimum allowed CPU for the pgbouncer container + displayName: Coordinator Autoscaling Min Allowed Pgbouncer Cpu + path: coordinator.autoscaling.minAllowed.pgbouncer.cpu + - description: The minimum allowed memory for the pgbouncer container + displayName: Coordinator Autoscaling Min Allowed Pgbouncer Memory + path: coordinator.autoscaling.minAllowed.pgbouncer.memory + - description: The minimum allowed CPU for the envoy container + displayName: Coordinator Autoscaling Min Allowed Envoy Cpu + path: coordinator.autoscaling.minAllowed.envoy.cpu + - description: The minimum allowed memory for the envoy container + displayName: Coordinator Autoscaling Min Allowed Envoy Memory + path: coordinator.autoscaling.minAllowed.envoy.memory + - description: The maximum allowed CPU for the patroni container + displayName: Coordinator Autoscaling Max Allowed Patroni Cpu + path: coordinator.autoscaling.maxAllowed.patroni.cpu + - description: The maximum allowed memory for the patroni container + displayName: Coordinator Autoscaling Max Allowed Patroni Memory + path: coordinator.autoscaling.maxAllowed.patroni.memory + - description: The maximum allowed CPU for the pgbouncer container + displayName: Coordinator Autoscaling Max Allowed Pgbouncer Cpu + path: coordinator.autoscaling.maxAllowed.pgbouncer.cpu + - description: The maximum allowed memory for the pgbouncer container + displayName: Coordinator Autoscaling Max Allowed Pgbouncer Memory + path: coordinator.autoscaling.maxAllowed.pgbouncer.memory + - description: The maximum allowed CPU for the envoy container + displayName: Coordinator Autoscaling Max Allowed Envoy Cpu + path: coordinator.autoscaling.maxAllowed.envoy.cpu + - description: The maximum allowed memory for the envoy container + displayName: Coordinator Autoscaling Max Allowed Envoy Memory + path: coordinator.autoscaling.maxAllowed.envoy.memory + - description: 'The target value for replicas connections used in order + to trigger the upscale of replica instances. + + ' + displayName: Coordinator Autoscaling Horizontal Eplicas Connections Usage + Target + path: coordinator.autoscaling.horizontal.eplicasConnectionsUsageTarget + - description: 'The metric type for connections used metric. See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + displayName: Coordinator Autoscaling Horizontal Replicas Connections Usage + Metric Type + path: coordinator.autoscaling.horizontal.replicasConnectionsUsageMetricType + - description: 'The period in seconds before the downscale of replica instances + can be triggered. + + ' + displayName: Coordinator Autoscaling Horizontal Cooldown Period + path: coordinator.autoscaling.horizontal.cooldownPeriod + - description: 'The interval in seconds to check if the scaleup or scaledown + have to be triggered. + + ' + displayName: Coordinator Autoscaling Horizontal Polling Interval + path: coordinator.autoscaling.horizontal.pollingInterval + - description: 'Recommender responsible for generating recommendation for + vertical Pod autoscaling. If not specified the default one will be used. + + ' + displayName: Coordinator Autoscaling Vertical Recommender + path: coordinator.autoscaling.vertical.recommender + - description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/reference/crd/sginstanceprofile/). + + + A SGInstanceProfile defines CPU and memory limits. Must exist before + creating a cluster. + + + When no profile is set, a default (1 core, 2 GiB RAM) one is used. + + + **Changing this field may require a restart.** + + ' + displayName: Coordinator SGInstanceProfile + path: coordinator.sgInstanceProfile + - description: If true, when any entry of any `SGScript` fail will not prevent + subsequent `SGScript` from being executed. By default is `false`. + displayName: Coordinator Managed Sql Continue On SG Script Error + path: coordinator.managedSql.continueOnSGScriptError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The id is immutable and must be unique across all the `SGScript` + entries. It is replaced by the operator and is used to identify the + `SGScript` entry. + displayName: Coordinator Managed Sql Scripts Id + path: coordinator.managedSql.scripts.id + - description: A reference to an `SGScript` + displayName: Coordinator Managed Sql Scripts SGScript + path: coordinator.managedSql.scripts.sgScript + - description: 'Size of the PersistentVolume set for each instance of the + cluster. This size is specified either in Mebibytes, Gibibytes or Tebibytes + (multiples of 2^20, 2^30 or 2^40, respectively). + + + If sharding type is `shardingsphere` then this field is ignored. + + ' + displayName: Coordinator Pods Persistent Volume Size + path: coordinator.pods.persistentVolume.size + - description: 'Name of an existing StorageClass in the Kubernetes cluster, + used to create the PersistentVolumes for the instances of the cluster. + + + If sharding type is `shardingsphere` then this field is ignored. + + ' + displayName: Coordinator Pods Persistent Volume Storage Class + path: coordinator.pods.persistentVolume.storageClass + - description: 'If set to `true`, avoids creating a connection pooling (using + [PgBouncer](https://www.pgbouncer.org/)) sidecar. + + + If sharding type is `shardingsphere` then this field is ignored. + + + **Changing this field may require a restart.** + + ' + displayName: Coordinator Pods Disable Connection Pooling + path: coordinator.pods.disableConnectionPooling + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + displayName: Coordinator Pods Disable Metrics Exporter + path: coordinator.pods.disableMetricsExporter + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `postgres-util` sidecar. + This sidecar contains usual Postgres administration utilities *that + are not present in the main (`patroni`) container*, like `psql`. Only + disable if you know what you are doing. + + + If sharding type is `shardingsphere` then this field is ignored. + + + **Changing this field may require a restart.** + + ' + displayName: Coordinator Pods Disable Postgres Util + path: coordinator.pods.disablePostgresUtil + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `envoy` sidecar. This + sidecar is used as the endge proxy for the cluster''s Pods providing + extra metrics to the monitoring layer. + + + **Changing this field may require a restart.** + + ' + displayName: Coordinator Pods Disable Envoy + path: coordinator.pods.disableEnvoy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When enabled resource limits for containers other than the + patroni container wil be set just like for patroni contianer as specified + in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + displayName: Coordinator Pods Resources Enable Cluster Limits Requirements + path: coordinator.pods.resources.enableClusterLimitsRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the resources requests values in fields\ + \ `SGInstanceProfile.spec.requests.cpu` and `SGInstanceProfile.spec.requests.memory`\ + \ will represent the resources\n requests of the patroni container and\ + \ the total resources requests calculated by adding the resources requests\ + \ of all the containers (including the patroni container).\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Coordinator Pods Resources Disable Resources Requests Split + From Total + path: coordinator.pods.resources.disableResourcesRequestsSplitFromTotal + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the reconciliation of the cluster will\ + \ fail if `disableResourcesRequestsSplitFromTotal` is not set or set\ + \ to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`.\n\nWhen `false` (the\ + \ default) and `disableResourcesRequestsSplitFromTotal` is not set or\ + \ set to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`\n then the patroni container\ + \ resources will be set to 0.\n" + displayName: Coordinator Pods Resources Fail When Total Is Higher + path: coordinator.pods.resources.failWhenTotalIsHigher + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Coordinator Pods Scheduling Node Selector + path: coordinator.pods.scheduling.nodeSelector + - description: The pod this Toleration is attached to tolerates any taint + that matches the triple using the matching operator + . + displayName: Coordinator Pods Scheduling Tolerations + path: coordinator.pods.scheduling.tolerations + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Coordinator Pods Scheduling Node Affinity + path: coordinator.pods.scheduling.nodeAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:nodeAffinity + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Coordinator Pods Scheduling Priority Class Name + path: coordinator.pods.scheduling.priorityClassName + - description: 'Pod affinity is a group of inter pod affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + displayName: Coordinator Pods Scheduling Pod Affinity + path: coordinator.pods.scheduling.podAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAffinity + - description: 'Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + displayName: Coordinator Pods Scheduling Pod Anti Affinity + path: coordinator.pods.scheduling.podAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity + - description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + displayName: Coordinator Pods Scheduling Topology Spread Constraints + path: coordinator.pods.scheduling.topologySpreadConstraints + - description: The label key that the selector applies to. + displayName: Coordinator Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Key + path: coordinator.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Coordinator Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Operator + path: coordinator.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Coordinator Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Values + path: coordinator.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Coordinator Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Fields Key + path: coordinator.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Coordinator Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Fields Operator + path: coordinator.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Coordinator Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Fields Values + path: coordinator.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Coordinator Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Weight + path: coordinator.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Coordinator Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Key + path: coordinator.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Coordinator Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Operator + path: coordinator.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Coordinator Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Values + path: coordinator.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Coordinator Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Key + path: coordinator.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Coordinator Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Operator + path: coordinator.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Coordinator Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Values + path: coordinator.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Coordinator Pods Scheduling Backup Tolerations + path: coordinator.pods.scheduling.backup.tolerations + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Coordinator Pods Scheduling Backup Node Affinity + path: coordinator.pods.scheduling.backup.nodeAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:nodeAffinity + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Coordinator Pods Scheduling Backup Priority Class Name + path: coordinator.pods.scheduling.backup.priorityClassName + - description: 'Pod affinity is a group of inter pod affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + displayName: Coordinator Pods Scheduling Backup Pod Affinity + path: coordinator.pods.scheduling.backup.podAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAffinity + - description: 'Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + displayName: Coordinator Pods Scheduling Backup Pod Anti Affinity + path: coordinator.pods.scheduling.backup.podAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity + - description: "managementPolicy controls how pods are created during initial\ + \ scale up, when replacing pods\n on nodes, or when scaling down. The\ + \ default policy is `OrderedReady`, where pods are created\n in increasing\ + \ order (pod-0, then pod-1, etc) and the controller will wait until\ + \ each pod is\n ready before continuing. When scaling down, the pods\ + \ are removed in the opposite order.\n The alternative policy is `Parallel`\ + \ which will create pods in parallel to match the desired\n scale without\ + \ waiting, and on scale down will delete all pods at once.\n\nIf sharding\ + \ type is `shardingsphere` then this field is ignored.\n" + displayName: Coordinator Pods Management Policy + path: coordinator.pods.managementPolicy + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Coordinator Pods Custom Volume Mounts Mount Path + path: coordinator.pods.customVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Coordinator Pods Custom Volume Mounts Mount Propagation + path: coordinator.pods.customVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Coordinator Pods Custom Volume Mounts Name + path: coordinator.pods.customVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Coordinator Pods Custom Volume Mounts Read Only + path: coordinator.pods.customVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Coordinator Pods Custom Volume Mounts Sub Path + path: coordinator.pods.customVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Coordinator Pods Custom Volume Mounts Sub Path Expr + path: coordinator.pods.customVolumeMounts.subPathExpr + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Coordinator Pods Custom Init Volume Mounts Mount Path + path: coordinator.pods.customInitVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Coordinator Pods Custom Init Volume Mounts Mount Propagation + path: coordinator.pods.customInitVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Coordinator Pods Custom Init Volume Mounts Name + path: coordinator.pods.customInitVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Coordinator Pods Custom Init Volume Mounts Read Only + path: coordinator.pods.customInitVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Coordinator Pods Custom Init Volume Mounts Sub Path + path: coordinator.pods.customInitVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Coordinator Pods Custom Init Volume Mounts Sub Path Expr + path: coordinator.pods.customInitVolumeMounts.subPathExpr + - description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. It must exist. When not set, a default Postgres + config, for the major version selected, is used. + + + If sharding type is `shardingsphere` then this field is ignored. + + + **Changing this field may require a restart.** + + ' + displayName: Coordinator Configurations SGPostgresConfig + path: coordinator.configurations.sgPostgresConfig + - description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. Each pod contains a sidecar with a connection + pooler (currently: [PgBouncer](https://www.pgbouncer.org/)). The connection + pooler is implemented as a sidecar. + + + If not set, a default configuration will be used. Disabling connection + pooling altogether is possible if the disableConnectionPooling property + of the pods object is set to true. + + + If sharding type is `shardingsphere` then this field is ignored. + + + **Changing this field may require a restart.** + + ' + displayName: Coordinator Configurations SGPoolingConfig + path: coordinator.configurations.sgPoolingConfig + - description: The version of the ShardingSphere Proxy. If not specified + latest version available will be used. + displayName: Coordinator Configurations Sharding Sphere Version + path: coordinator.configurations.shardingSphere.version + - description: 'Allow to configure the Sharding Shpere Proxy mode type. + Options available are: + + + * `Standalone` + + * `Cluster` + + + When `Standalone` only 1 coordinator instance may be set. + + ' + displayName: Coordinator Configurations Sharding Sphere Mode Type + path: coordinator.configurations.shardingSphere.mode.type + - description: 'Allow to configure the Sharding Shpere Proxy repository + type. Options available are: + + + * `Memory` + + * `ZooKeeper` + + * `Etcd` + + + When `mode.type` is `standalone` then `repository.type` must be memory. + + When `mode.type` is `cluster` then `repository.type` could be any of + zooKeeper or etcd. + + ' + displayName: Coordinator Configurations Sharding Sphere Mode Repository + Type + path: coordinator.configurations.shardingSphere.mode.repository.type + - description: ZooKeeper server to connect to. + displayName: Coordinator Configurations Sharding Sphere Mode Repository + Zoo Keeper Server List + path: coordinator.configurations.shardingSphere.mode.repository.zooKeeper.serverList + - description: Etcd server to connect to. + displayName: Coordinator Configurations Sharding Sphere Mode Repository + Etcd Server List + path: coordinator.configurations.shardingSphere.mode.repository.etcd.serverList + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Coordinator Configurations Sharding Sphere Authority Users + User Name + path: coordinator.configurations.shardingSphere.authority.users.user.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Coordinator Configurations Sharding Sphere Authority Users + User Key + path: coordinator.configurations.shardingSphere.authority.users.user.key + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Coordinator Configurations Sharding Sphere Authority Users + Password Name + path: coordinator.configurations.shardingSphere.authority.users.password.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Coordinator Configurations Sharding Sphere Authority Users + Password Key + path: coordinator.configurations.shardingSphere.authority.users.password.key + - description: 'Allow to configure the Sharding Shpere Proxy authority privilege + type. + + + See also https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-proxy/yaml-config/authority/#authorization-configuration + + ' + displayName: Coordinator Configurations Sharding Sphere Authority Privilege + Type + path: coordinator.configurations.shardingSphere.authority.privilege.type + - description: 'Allow to configure the mappings between users and databases. + + + See also https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-proxy/yaml-config/authority/#database_permitted + + ' + displayName: Coordinator Configurations Sharding Sphere Authority Privilege + User Database Mappings + path: coordinator.configurations.shardingSphere.authority.privilege.userDatabaseMappings + - description: The namespace of the ServiceAccount used by ShardingSphere + operator + displayName: Coordinator Configurations Sharding Sphere Service Account + Namespace + path: coordinator.configurations.shardingSphere.serviceAccount.namespace + - description: The name of the ServiceAccount used by ShardingSphere operator + displayName: Coordinator Configurations Sharding Sphere Service Account + Name + path: coordinator.configurations.shardingSphere.serviceAccount.name + - description: 'The replication mode applied to the whole cluster. + + Possible values are: + + * `async` (default) + + * `sync` + + * `strict-sync` + + * `sync-all` + + * `strict-sync-all` + + + **async** + + + When in asynchronous mode the cluster is allowed to lose some committed + transactions. + + When the primary server fails or becomes unavailable for any other reason + a sufficiently healthy standby + + will automatically be promoted to primary. Any transactions that have + not been replicated to that standby + + remain in a "forked timeline" on the primary, and are effectively unrecoverable + (the data is still there, + + but recovering it requires a manual recovery effort by data recovery + specialists). + + + **sync** + + + When in synchronous mode a standby will not be promoted unless it is + certain that the standby contains all + + transactions that may have returned a successful commit status to client + (clients can change the behavior + + per transaction using PostgreSQL’s `synchronous_commit` setting. Transactions + with `synchronous_commit` + + values of `off` and `local` may be lost on fail over, but will not be + blocked by replication delays). This + + means that the system may be unavailable for writes even though some + servers are available. System + + administrators can still use manual failover commands to promote a standby + even if it results in transaction + + loss. + + + Synchronous mode does not guarantee multi node durability of commits + under all circumstances. When no suitable + + standby is available, primary server will still accept writes, but does + not guarantee their replication. When + + the primary fails in this mode no standby will be promoted. When the + host that used to be the primary comes + + back it will get promoted automatically, unless system administrator + performed a manual failover. This behavior + + makes synchronous mode usable with 2 node clusters. + + + When synchronous mode is used and a standby crashes, commits will block + until the primary is switched to standalone + + mode. Manually shutting down or restarting a standby will not cause + a commit service interruption. Standby will + + signal the primary to release itself from synchronous standby duties + before PostgreSQL shutdown is initiated. + + + **strict-sync** + + + When it is absolutely necessary to guarantee that each write is stored + durably on at least two nodes, use the strict + + synchronous mode. This mode prevents synchronous replication to be switched + off on the primary when no synchronous + + standby candidates are available. As a downside, the primary will not + be available for writes (unless the Postgres + + transaction explicitly turns off `synchronous_mode` parameter), blocking + all client write requests until at least one + + synchronous replica comes up. + + + **Note**: Because of the way synchronous replication is implemented + in PostgreSQL it is still possible to lose + + transactions even when using strict synchronous mode. If the PostgreSQL + backend is cancelled while waiting to acknowledge + + replication (as a result of packet cancellation due to client timeout + or backend failure) transaction changes become + + visible for other backends. Such changes are not yet replicated and + may be lost in case of standby promotion. + + + **sync-all** + + + The same as `sync` but `syncInstances` is ignored and the number of + synchronous instances is equals to the total number + + of instances less one. + + + **strict-sync-all** + + + The same as `strict-sync` but `syncInstances` is ignored and the number + of synchronous instances is equals to the total number + + of instances less one. + + ' + displayName: Coordinator Replication Mode + path: coordinator.replication.mode + - description: "Number of synchronous standby instances. Must be less than\ + \ the total number of instances. It is set to 1 by default.\n Only\ + \ setteable if mode is `sync` or `strict-sync`.\n" + displayName: Coordinator Replication Sync Instances + path: coordinator.replication.syncInstances + - description: "Allow to specify how the replicas are initialized.\n\nPossible\ + \ values are:\n\n* `FromPrimary`: When this mode is used replicas will\ + \ be always created from the primary using `pg_basebackup`.\n* `FromReplica`:\ + \ When this mode is used replicas will be created from another existing\ + \ replica using\n `pg_basebackup`. Fallsback to `FromPrimary` if there's\ + \ no replica or it fails.\n* `FromExistingBackup`: When this mode is\ + \ used replicas will be created from an existing SGBackup. If `backupNewerThan`\ + \ is set\n the SGBackup must be newer than its value. When this mode\ + \ fails to restore an SGBackup it will try with a previous one (if exists).\n\ + \ Fallsback to `FromReplica` if there's no backup left or it fails.\n\ + * `FromNewlyCreatedBackup`: When this mode is used replicas will be\ + \ created from a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent backup newer than\ + \ its value or it fails.\n" + displayName: Coordinator Replication Initialization Mode + path: coordinator.replication.initialization.mode + - description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`, that\ + \ specifies how old an SGBackup have to be in order to be seleceted\n\ + \ to initialize a replica.\n\nWhen `FromExistingBackup` mode is set\ + \ this field restrict the selection of SGBackup to be used for recovery\ + \ newer than the\n specified value. \n\nWhen `FromNewlyCreatedBackup`\ + \ mode is set this field skip the creation SGBackup to be used for recovery\ + \ if one newer than\n the specified value exists. \n" + displayName: Coordinator Replication Initialization Backup Newer Than + path: coordinator.replication.initialization.backupNewerThan + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Coordinator Replication Initialization Backup Restore Performance + Max Network Bandwidth + path: coordinator.replication.initialization.backupRestorePerformance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Coordinator Replication Initialization Backup Restore Performance + Max Disk Bandwidth + path: coordinator.replication.initialization.backupRestorePerformance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Coordinator Replication Initialization Backup Restore Performance + Download Concurrency + path: coordinator.replication.initialization.backupRestorePerformance.downloadConcurrency + - displayName: Coordinator Metadata Annotations All Resources + path: coordinator.metadata.annotations.allResources + - displayName: Coordinator Metadata Annotations Cluster Pods + path: coordinator.metadata.annotations.clusterPods + - displayName: Coordinator Metadata Annotations Services + path: coordinator.metadata.annotations.services + - displayName: Coordinator Metadata Annotations Primary Service + path: coordinator.metadata.annotations.primaryService + - displayName: Coordinator Metadata Annotations Replicas Service + path: coordinator.metadata.annotations.replicasService + - displayName: Coordinator Metadata Labels Cluster Pods + path: coordinator.metadata.labels.clusterPods + - displayName: Coordinator Metadata Labels Services + path: coordinator.metadata.labels.services + - description: 'Number of shard''s StackGres clusters + + ' + displayName: Shards Clusters + path: shards.clusters + - description: "Number of StackGres instances per shard's StackGres cluster.\ + \ Each instance contains one Postgres server.\n Out of all of the Postgres\ + \ servers, one is elected as the primary, the rest remain as read-only\ + \ replicas.\n" + displayName: Shards Instances Per Cluster + path: shards.instancesPerCluster + - description: 'Allow to enable or disable any of horizontal and vertical + Pod autoscaling. + + + Possible values are: + + * `vertical`: only vertical Pod autoscaling will be enabled (default) + + * `none`: all autoscaling will be disabled + + ' + displayName: Shards Autoscaling Mode + path: shards.autoscaling.mode + - description: The minimum allowed CPU for the patroni container + displayName: Shards Autoscaling Min Allowed Patroni Cpu + path: shards.autoscaling.minAllowed.patroni.cpu + - description: The minimum allowed memory for the patroni container + displayName: Shards Autoscaling Min Allowed Patroni Memory + path: shards.autoscaling.minAllowed.patroni.memory + - description: The minimum allowed CPU for the pgbouncer container + displayName: Shards Autoscaling Min Allowed Pgbouncer Cpu + path: shards.autoscaling.minAllowed.pgbouncer.cpu + - description: The minimum allowed memory for the pgbouncer container + displayName: Shards Autoscaling Min Allowed Pgbouncer Memory + path: shards.autoscaling.minAllowed.pgbouncer.memory + - description: The minimum allowed CPU for the envoy container + displayName: Shards Autoscaling Min Allowed Envoy Cpu + path: shards.autoscaling.minAllowed.envoy.cpu + - description: The minimum allowed memory for the envoy container + displayName: Shards Autoscaling Min Allowed Envoy Memory + path: shards.autoscaling.minAllowed.envoy.memory + - description: The maximum allowed CPU for the patroni container + displayName: Shards Autoscaling Max Allowed Patroni Cpu + path: shards.autoscaling.maxAllowed.patroni.cpu + - description: The maximum allowed memory for the patroni container + displayName: Shards Autoscaling Max Allowed Patroni Memory + path: shards.autoscaling.maxAllowed.patroni.memory + - description: The maximum allowed CPU for the pgbouncer container + displayName: Shards Autoscaling Max Allowed Pgbouncer Cpu + path: shards.autoscaling.maxAllowed.pgbouncer.cpu + - description: The maximum allowed memory for the pgbouncer container + displayName: Shards Autoscaling Max Allowed Pgbouncer Memory + path: shards.autoscaling.maxAllowed.pgbouncer.memory + - description: The maximum allowed CPU for the envoy container + displayName: Shards Autoscaling Max Allowed Envoy Cpu + path: shards.autoscaling.maxAllowed.envoy.cpu + - description: The maximum allowed memory for the envoy container + displayName: Shards Autoscaling Max Allowed Envoy Memory + path: shards.autoscaling.maxAllowed.envoy.memory + - description: 'The target value for replicas connections used in order + to trigger the upscale of replica instances. + + ' + displayName: Shards Autoscaling Horizontal Eplicas Connections Usage Target + path: shards.autoscaling.horizontal.eplicasConnectionsUsageTarget + - description: 'The metric type for connections used metric. See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + displayName: Shards Autoscaling Horizontal Replicas Connections Usage + Metric Type + path: shards.autoscaling.horizontal.replicasConnectionsUsageMetricType + - description: 'The period in seconds before the downscale of replica instances + can be triggered. + + ' + displayName: Shards Autoscaling Horizontal Cooldown Period + path: shards.autoscaling.horizontal.cooldownPeriod + - description: 'The interval in seconds to check if the scaleup or scaledown + have to be triggered. + + ' + displayName: Shards Autoscaling Horizontal Polling Interval + path: shards.autoscaling.horizontal.pollingInterval + - description: 'Recommender responsible for generating recommendation for + vertical Pod autoscaling. If not specified the default one will be used. + + ' + displayName: Shards Autoscaling Vertical Recommender + path: shards.autoscaling.vertical.recommender + - description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/reference/crd/sginstanceprofile/). + + + A SGInstanceProfile defines CPU and memory limits. Must exist before + creating a cluster. + + + When no profile is set, a default (1 core, 2 GiB RAM) one is used. + + + **Changing this field may require a restart.** + + ' + displayName: Shards SGInstanceProfile + path: shards.sgInstanceProfile + - description: If true, when any entry of any `SGScript` fail will not prevent + subsequent `SGScript` from being executed. By default is `false`. + displayName: Shards Managed Sql Continue On SG Script Error + path: shards.managedSql.continueOnSGScriptError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The id is immutable and must be unique across all the `SGScript` + entries. It is replaced by the operator and is used to identify the + `SGScript` entry. + displayName: Shards Managed Sql Scripts Id + path: shards.managedSql.scripts.id + - description: A reference to an `SGScript` + displayName: Shards Managed Sql Scripts SGScript + path: shards.managedSql.scripts.sgScript + - description: 'Size of the PersistentVolume set for each instance of the + cluster. This size is specified either in Mebibytes, Gibibytes or Tebibytes + (multiples of 2^20, 2^30 or 2^40, respectively). + + ' + displayName: Shards Pods Persistent Volume Size + path: shards.pods.persistentVolume.size + - description: 'Name of an existing StorageClass in the Kubernetes cluster, + used to create the PersistentVolumes for the instances of the cluster. + + ' + displayName: Shards Pods Persistent Volume Storage Class + path: shards.pods.persistentVolume.storageClass + - description: 'If set to `true`, avoids creating a connection pooling (using + [PgBouncer](https://www.pgbouncer.org/)) sidecar. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Pods Disable Connection Pooling + path: shards.pods.disableConnectionPooling + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + displayName: Shards Pods Disable Metrics Exporter + path: shards.pods.disableMetricsExporter + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `postgres-util` sidecar. + This sidecar contains usual Postgres administration utilities *that + are not present in the main (`patroni`) container*, like `psql`. Only + disable if you know what you are doing. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Pods Disable Postgres Util + path: shards.pods.disablePostgresUtil + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `envoy` sidecar. This + sidecar is used as the endge proxy for the cluster''s Pods providing + extra metrics to the monitoring layer. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Pods Disable Envoy + path: shards.pods.disableEnvoy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When enabled resource limits for containers other than the + patroni container wil be set just like for patroni contianer as specified + in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Pods Resources Enable Cluster Limits Requirements + path: shards.pods.resources.enableClusterLimitsRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the resources requests values in fields\ + \ `SGInstanceProfile.spec.requests.cpu` and `SGInstanceProfile.spec.requests.memory`\ + \ will represent the resources\n requests of the patroni container and\ + \ the total resources requests calculated by adding the resources requests\ + \ of all the containers (including the patroni container).\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Shards Pods Resources Disable Resources Requests Split From + Total + path: shards.pods.resources.disableResourcesRequestsSplitFromTotal + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the reconciliation of the cluster will\ + \ fail if `disableResourcesRequestsSplitFromTotal` is not set or set\ + \ to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`.\n\nWhen `false` (the\ + \ default) and `disableResourcesRequestsSplitFromTotal` is not set or\ + \ set to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`\n then the patroni container\ + \ resources will be set to 0.\n" + displayName: Shards Pods Resources Fail When Total Is Higher + path: shards.pods.resources.failWhenTotalIsHigher + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Shards Pods Scheduling Node Selector + path: shards.pods.scheduling.nodeSelector + - description: The pod this Toleration is attached to tolerates any taint + that matches the triple using the matching operator + . + displayName: Shards Pods Scheduling Tolerations + path: shards.pods.scheduling.tolerations + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Shards Pods Scheduling Node Affinity + path: shards.pods.scheduling.nodeAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:nodeAffinity + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Shards Pods Scheduling Priority Class Name + path: shards.pods.scheduling.priorityClassName + - description: 'Pod affinity is a group of inter pod affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + displayName: Shards Pods Scheduling Pod Affinity + path: shards.pods.scheduling.podAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAffinity + - description: 'Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + displayName: Shards Pods Scheduling Pod Anti Affinity + path: shards.pods.scheduling.podAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity + - description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + displayName: Shards Pods Scheduling Topology Spread Constraints + path: shards.pods.scheduling.topologySpreadConstraints + - description: The label key that the selector applies to. + displayName: Shards Pods Scheduling Backup Node Selector Preferred During + Scheduling Ignored During Execution Preference Match Expressions Key + path: shards.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Pods Scheduling Backup Node Selector Preferred During + Scheduling Ignored During Execution Preference Match Expressions Operator + path: shards.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Shards Pods Scheduling Backup Node Selector Preferred During + Scheduling Ignored During Execution Preference Match Expressions Values + path: shards.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Pods Scheduling Backup Node Selector Preferred During + Scheduling Ignored During Execution Preference Match Fields Key + path: shards.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Pods Scheduling Backup Node Selector Preferred During + Scheduling Ignored During Execution Preference Match Fields Operator + path: shards.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Shards Pods Scheduling Backup Node Selector Preferred During + Scheduling Ignored During Execution Preference Match Fields Values + path: shards.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Shards Pods Scheduling Backup Node Selector Preferred During + Scheduling Ignored During Execution Weight + path: shards.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Shards Pods Scheduling Backup Node Selector Required During + Scheduling Ignored During Execution Node Selector Terms Match Expressions + Key + path: shards.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Pods Scheduling Backup Node Selector Required During + Scheduling Ignored During Execution Node Selector Terms Match Expressions + Operator + path: shards.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Shards Pods Scheduling Backup Node Selector Required During + Scheduling Ignored During Execution Node Selector Terms Match Expressions + Values + path: shards.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Pods Scheduling Backup Node Selector Required During + Scheduling Ignored During Execution Node Selector Terms Match Fields + Key + path: shards.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Pods Scheduling Backup Node Selector Required During + Scheduling Ignored During Execution Node Selector Terms Match Fields + Operator + path: shards.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Shards Pods Scheduling Backup Node Selector Required During + Scheduling Ignored During Execution Node Selector Terms Match Fields + Values + path: shards.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Shards Pods Scheduling Backup Tolerations + path: shards.pods.scheduling.backup.tolerations + - description: 'Node affinity is a group of node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + displayName: Shards Pods Scheduling Backup Node Affinity + path: shards.pods.scheduling.backup.nodeAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:nodeAffinity + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Shards Pods Scheduling Backup Priority Class Name + path: shards.pods.scheduling.backup.priorityClassName + - description: 'Pod affinity is a group of inter pod affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + displayName: Shards Pods Scheduling Backup Pod Affinity + path: shards.pods.scheduling.backup.podAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAffinity + - description: 'Pod anti affinity is a group of inter pod anti affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + displayName: Shards Pods Scheduling Backup Pod Anti Affinity + path: shards.pods.scheduling.backup.podAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podAntiAffinity + - description: "managementPolicy controls how pods are created during initial\ + \ scale up, when replacing pods\n on nodes, or when scaling down. The\ + \ default policy is `OrderedReady`, where pods are created\n in increasing\ + \ order (pod-0, then pod-1, etc) and the controller will wait until\ + \ each pod is\n ready before continuing. When scaling down, the pods\ + \ are removed in the opposite order.\n The alternative policy is `Parallel`\ + \ which will create pods in parallel to match the desired\n scale without\ + \ waiting, and on scale down will delete all pods at once.\n" + displayName: Shards Pods Management Policy + path: shards.pods.managementPolicy + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Shards Pods Custom Volume Mounts Mount Path + path: shards.pods.customVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Shards Pods Custom Volume Mounts Mount Propagation + path: shards.pods.customVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Shards Pods Custom Volume Mounts Name + path: shards.pods.customVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Shards Pods Custom Volume Mounts Read Only + path: shards.pods.customVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Shards Pods Custom Volume Mounts Sub Path + path: shards.pods.customVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Shards Pods Custom Volume Mounts Sub Path Expr + path: shards.pods.customVolumeMounts.subPathExpr + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Shards Pods Custom Init Volume Mounts Mount Path + path: shards.pods.customInitVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Shards Pods Custom Init Volume Mounts Mount Propagation + path: shards.pods.customInitVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Shards Pods Custom Init Volume Mounts Name + path: shards.pods.customInitVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Shards Pods Custom Init Volume Mounts Read Only + path: shards.pods.customInitVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Shards Pods Custom Init Volume Mounts Sub Path + path: shards.pods.customInitVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Shards Pods Custom Init Volume Mounts Sub Path Expr + path: shards.pods.customInitVolumeMounts.subPathExpr + - description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. It must exist. When not set, a default Postgres + config, for the major version selected, is used. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Configurations SGPostgresConfig + path: shards.configurations.sgPostgresConfig + - description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. Each pod contains a sidecar with a connection + pooler (currently: [PgBouncer](https://www.pgbouncer.org/)). The connection + pooler is implemented as a sidecar. + + + If not set, a default configuration will be used. Disabling connection + pooling altogether is possible if the disableConnectionPooling property + of the pods object is set to true. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Configurations SGPoolingConfig + path: shards.configurations.sgPoolingConfig + - description: "The replication mode applied to the whole cluster.\nPossible\ + \ values are:\n* `async` (default)\n* `sync`\n* `strict-sync`\n* `sync-all`\n\ + * `strict-sync-all`\n\n**async**\n\nWhen in asynchronous mode the cluster\ + \ is allowed to lose some committed transactions.\n When the primary\ + \ server fails or becomes unavailable for any other reason a sufficiently\ + \ healthy standby\n will automatically be promoted to primary. Any\ + \ transactions that have not been replicated to that standby\n remain\ + \ in a \"forked timeline\" on the primary, and are effectively unrecoverable\ + \ (the data is still there,\n but recovering it requires a manual recovery\ + \ effort by data recovery specialists).\n\n**sync**\n\nWhen in synchronous\ + \ mode a standby will not be promoted unless it is certain that the\ + \ standby contains all\n transactions that may have returned a successful\ + \ commit status to client (clients can change the behavior\n per transaction\ + \ using PostgreSQL’s `synchronous_commit` setting. Transactions with\ + \ `synchronous_commit`\n values of `off` and `local` may be lost on\ + \ fail over, but will not be blocked by replication delays). This\n\ + \ means that the system may be unavailable for writes even though some\ + \ servers are available. System\n administrators can still use manual\ + \ failover commands to promote a standby even if it results in transaction\n\ + \ loss.\n\nSynchronous mode does not guarantee multi node durability\ + \ of commits under all circumstances. When no suitable\n standby is\ + \ available, primary server will still accept writes, but does not guarantee\ + \ their replication. When\n the primary fails in this mode no standby\ + \ will be promoted. When the host that used to be the primary comes\n\ + \ back it will get promoted automatically, unless system administrator\ + \ performed a manual failover. This behavior\n makes synchronous mode\ + \ usable with 2 node clusters.\n\nWhen synchronous mode is used and\ + \ a standby crashes, commits will block until the primary is switched\ + \ to standalone\n mode. Manually shutting down or restarting a standby\ + \ will not cause a commit service interruption. Standby will\n signal\ + \ the primary to release itself from synchronous standby duties before\ + \ PostgreSQL shutdown is initiated.\n\n**strict-sync**\n\nWhen it is\ + \ absolutely necessary to guarantee that each write is stored durably\ + \ on at least two nodes, use the strict\n synchronous mode. This mode\ + \ prevents synchronous replication to be switched off on the primary\ + \ when no synchronous\n standby candidates are available. As a downside,\ + \ the primary will not be available for writes (unless the Postgres\n\ + \ transaction explicitly turns off `synchronous_mode` parameter), blocking\ + \ all client write requests until at least one\n synchronous replica\ + \ comes up.\n\n**Note**: Because of the way synchronous replication\ + \ is implemented in PostgreSQL it is still possible to lose\n transactions\ + \ even when using strict synchronous mode. If the PostgreSQL backend\ + \ is cancelled while waiting to acknowledge\n replication (as a result\ + \ of packet cancellation due to client timeout or backend failure) transaction\ + \ changes become\n visible for other backends. Such changes are not\ + \ yet replicated and may be lost in case of standby promotion.\n\n**sync-all**\n\ + \nThe same as `sync` but `syncInstances` is ignored and the number of\ + \ synchronous instances is equals to the total number\n of instances\ + \ less one.\n\n**strict-sync-all**\n\nThe same as `strict-sync` but\ + \ `syncInstances` is ignored and the number of synchronous instances\ + \ is equals to the total number\n of instances less one.\n" + displayName: Shards Replication Mode + path: shards.replication.mode + - description: "Number of synchronous standby instances. Must be less than\ + \ the total number of instances. It is set to 1 by default.\n Only\ + \ setteable if mode is `sync` or `strict-sync`.\n" + displayName: Shards Replication Sync Instances + path: shards.replication.syncInstances + - description: "Allow to specify how the replicas are initialized.\n\nPossible\ + \ values are:\n\n* `FromPrimary`: When this mode is used replicas will\ + \ be always created from the primary using `pg_basebackup`.\n* `FromReplica`:\ + \ When this mode is used replicas will be created from another existing\ + \ replica using\n `pg_basebackup`. Fallsback to `FromPrimary` if there's\ + \ no replica or it fails.\n* `FromExistingBackup`: When this mode is\ + \ used replicas will be created from an existing SGBackup. If `backupNewerThan`\ + \ is set\n the SGBackup must be newer than its value. When this mode\ + \ fails to restore an SGBackup it will try with a previous one (if exists).\n\ + \ Fallsback to `FromReplica` if there's no backup left or it fails.\n\ + * `FromNewlyCreatedBackup`: When this mode is used replicas will be\ + \ created from a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent backup newer than\ + \ its value or it fails.\n" + displayName: Shards Replication Initialization Mode + path: shards.replication.initialization.mode + - description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`, that\ + \ specifies how old an SGBackup have to be in order to be seleceted\n\ + \ to initialize a replica.\n\nWhen `FromExistingBackup` mode is set\ + \ this field restrict the selection of SGBackup to be used for recovery\ + \ newer than the\n specified value. \n\nWhen `FromNewlyCreatedBackup`\ + \ mode is set this field skip the creation SGBackup to be used for recovery\ + \ if one newer than\n the specified value exists. \n" + displayName: Shards Replication Initialization Backup Newer Than + path: shards.replication.initialization.backupNewerThan + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Shards Replication Initialization Backup Restore Performance + Max Network Bandwidth + path: shards.replication.initialization.backupRestorePerformance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Shards Replication Initialization Backup Restore Performance + Max Disk Bandwidth + path: shards.replication.initialization.backupRestorePerformance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Shards Replication Initialization Backup Restore Performance + Download Concurrency + path: shards.replication.initialization.backupRestorePerformance.downloadConcurrency + - displayName: Shards Metadata Annotations All Resources + path: shards.metadata.annotations.allResources + - displayName: Shards Metadata Annotations Cluster Pods + path: shards.metadata.annotations.clusterPods + - displayName: Shards Metadata Annotations Services + path: shards.metadata.annotations.services + - displayName: Shards Metadata Annotations Primary Service + path: shards.metadata.annotations.primaryService + - displayName: Shards Metadata Annotations Replicas Service + path: shards.metadata.annotations.replicasService + - displayName: Shards Metadata Labels Cluster Pods + path: shards.metadata.labels.clusterPods + - displayName: Shards Metadata Labels Services + path: shards.metadata.labels.services + - description: 'Identifier of the shard StackGres cluster to override (starting + from 0) + + ' + displayName: Shards Overrides Index + path: shards.overrides.index + - description: "Number of StackGres instances per shard's StackGres cluster.\ + \ Each instance contains one Postgres server.\n Out of all of the Postgres\ + \ servers, one is elected as the primary, the rest remain as read-only\ + \ replicas.\n" + displayName: Shards Overrides Instances Per Cluster + path: shards.overrides.instancesPerCluster + - description: 'Allow to enable or disable any of horizontal and vertical + Pod autoscaling. + + + Possible values are: + + * `vertical`: only vertical Pod autoscaling will be enabled (default) + + * `none`: all autoscaling will be disabled + + ' + displayName: Shards Overrides Autoscaling Mode + path: shards.overrides.autoscaling.mode + - description: The minimum allowed CPU for the patroni container + displayName: Shards Overrides Autoscaling Min Allowed Patroni Cpu + path: shards.overrides.autoscaling.minAllowed.patroni.cpu + - description: The minimum allowed memory for the patroni container + displayName: Shards Overrides Autoscaling Min Allowed Patroni Memory + path: shards.overrides.autoscaling.minAllowed.patroni.memory + - description: The minimum allowed CPU for the pgbouncer container + displayName: Shards Overrides Autoscaling Min Allowed Pgbouncer Cpu + path: shards.overrides.autoscaling.minAllowed.pgbouncer.cpu + - description: The minimum allowed memory for the pgbouncer container + displayName: Shards Overrides Autoscaling Min Allowed Pgbouncer Memory + path: shards.overrides.autoscaling.minAllowed.pgbouncer.memory + - description: The minimum allowed CPU for the envoy container + displayName: Shards Overrides Autoscaling Min Allowed Envoy Cpu + path: shards.overrides.autoscaling.minAllowed.envoy.cpu + - description: The minimum allowed memory for the envoy container + displayName: Shards Overrides Autoscaling Min Allowed Envoy Memory + path: shards.overrides.autoscaling.minAllowed.envoy.memory + - description: The maximum allowed CPU for the patroni container + displayName: Shards Overrides Autoscaling Max Allowed Patroni Cpu + path: shards.overrides.autoscaling.maxAllowed.patroni.cpu + - description: The maximum allowed memory for the patroni container + displayName: Shards Overrides Autoscaling Max Allowed Patroni Memory + path: shards.overrides.autoscaling.maxAllowed.patroni.memory + - description: The maximum allowed CPU for the pgbouncer container + displayName: Shards Overrides Autoscaling Max Allowed Pgbouncer Cpu + path: shards.overrides.autoscaling.maxAllowed.pgbouncer.cpu + - description: The maximum allowed memory for the pgbouncer container + displayName: Shards Overrides Autoscaling Max Allowed Pgbouncer Memory + path: shards.overrides.autoscaling.maxAllowed.pgbouncer.memory + - description: The maximum allowed CPU for the envoy container + displayName: Shards Overrides Autoscaling Max Allowed Envoy Cpu + path: shards.overrides.autoscaling.maxAllowed.envoy.cpu + - description: The maximum allowed memory for the envoy container + displayName: Shards Overrides Autoscaling Max Allowed Envoy Memory + path: shards.overrides.autoscaling.maxAllowed.envoy.memory + - description: 'The target value for replicas connections used in order + to trigger the upscale of replica instances. + + ' + displayName: Shards Overrides Autoscaling Horizontal Eplicas Connections + Usage Target + path: shards.overrides.autoscaling.horizontal.eplicasConnectionsUsageTarget + - description: 'The metric type for connections used metric. See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + displayName: Shards Overrides Autoscaling Horizontal Replicas Connections + Usage Metric Type + path: shards.overrides.autoscaling.horizontal.replicasConnectionsUsageMetricType + - description: 'The period in seconds before the downscale of replica instances + can be triggered. + + ' + displayName: Shards Overrides Autoscaling Horizontal Cooldown Period + path: shards.overrides.autoscaling.horizontal.cooldownPeriod + - description: 'The interval in seconds to check if the scaleup or scaledown + have to be triggered. + + ' + displayName: Shards Overrides Autoscaling Horizontal Polling Interval + path: shards.overrides.autoscaling.horizontal.pollingInterval + - description: 'Recommender responsible for generating recommendation for + vertical Pod autoscaling. If not specified the default one will be used. + + ' + displayName: Shards Overrides Autoscaling Vertical Recommender + path: shards.overrides.autoscaling.vertical.recommender + - description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/04-postgres-cluster-management/03-resource-profiles/). + A SGInstanceProfile defines CPU and memory limits. Must exist before + creating a cluster. When no profile is set, a default (currently: 1 + core, 2 GiB RAM) one is used. + + ' + displayName: Shards Overrides SGInstanceProfile + path: shards.overrides.sgInstanceProfile + - description: If true, when any entry of any `SGScript` fail will not prevent + subsequent `SGScript` from being executed. By default is `false`. + displayName: Shards Overrides Managed Sql Continue On SG Script Error + path: shards.overrides.managedSql.continueOnSGScriptError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The id is immutable and must be unique across all the `SGScript` + entries. It is replaced by the operator and is used to identify the + `SGScript` entry. + displayName: Shards Overrides Managed Sql Scripts Id + path: shards.overrides.managedSql.scripts.id + - description: A reference to an `SGScript` + displayName: Shards Overrides Managed Sql Scripts SGScript + path: shards.overrides.managedSql.scripts.sgScript + - description: 'Size of the PersistentVolume set for each instance of the + cluster. This size is specified either in Mebibytes, Gibibytes or Tebibytes + (multiples of 2^20, 2^30 or 2^40, respectively). + + ' + displayName: Shards Overrides Pods Persistent Volume Size + path: shards.overrides.pods.persistentVolume.size + - description: 'Name of an existing StorageClass in the Kubernetes cluster, + used to create the PersistentVolumes for the instances of the cluster. + + ' + displayName: Shards Overrides Pods Persistent Volume Storage Class + path: shards.overrides.pods.persistentVolume.storageClass + - description: 'If set to `true`, avoids creating a connection pooling (using + [PgBouncer](https://www.pgbouncer.org/)) sidecar. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Overrides Pods Disable Connection Pooling + path: shards.overrides.pods.disableConnectionPooling + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + displayName: Shards Overrides Pods Disable Metrics Exporter + path: shards.overrides.pods.disableMetricsExporter + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `postgres-util` sidecar. + This sidecar contains usual Postgres administration utilities *that + are not present in the main (`patroni`) container*, like `psql`. Only + disable if you know what you are doing. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Overrides Pods Disable Postgres Util + path: shards.overrides.pods.disablePostgresUtil + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'If set to `true`, avoids creating the `envoy` sidecar. This + sidecar is used as the endge proxy for the cluster''s Pods providing + extra metrics to the monitoring layer. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Overrides Pods Disable Envoy + path: shards.overrides.pods.disableEnvoy + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When enabled resource limits for containers other than the + patroni container wil be set just like for patroni contianer as specified + in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + displayName: Shards Overrides Pods Resources Enable Cluster Limits Requirements + path: shards.overrides.pods.resources.enableClusterLimitsRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the resources requests values in fields\ + \ `SGInstanceProfile.spec.requests.cpu` and `SGInstanceProfile.spec.requests.memory`\ + \ will represent the resources\n requests of the patroni container and\ + \ the total resources requests calculated by adding the resources requests\ + \ of all the containers (including the patroni container).\n\n**Changing\ + \ this field may require a restart.**\n" + displayName: Shards Overrides Pods Resources Disable Resources Requests + Split From Total + path: shards.overrides.pods.resources.disableResourcesRequestsSplitFromTotal + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "When set to `true` the reconciliation of the cluster will\ + \ fail if `disableResourcesRequestsSplitFromTotal` is not set or set\ + \ to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`.\n\nWhen `false` (the\ + \ default) and `disableResourcesRequestsSplitFromTotal` is not set or\ + \ set to `false` and the sum of the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`\n then the patroni container\ + \ resources will be set to 0.\n" + displayName: Shards Overrides Pods Resources Fail When Total Is Higher + path: shards.overrides.pods.resources.failWhenTotalIsHigher + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Shards Overrides Pods Scheduling Node Selector + path: shards.overrides.pods.scheduling.nodeSelector + - description: Effect indicates the taint effect to match. Empty means match + all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + displayName: Shards Overrides Pods Scheduling Tolerations Effect + path: shards.overrides.pods.scheduling.tolerations.effect + - description: Key is the taint key that the toleration applies to. Empty + means match all taint keys. If the key is empty, operator must be Exists; + this combination means to match all values and all keys. + displayName: Shards Overrides Pods Scheduling Tolerations Key + path: shards.overrides.pods.scheduling.tolerations.key + - description: Operator represents a key's relationship to the value. Valid + operators are Exists and Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate all taints of a particular + category. + displayName: Shards Overrides Pods Scheduling Tolerations Operator + path: shards.overrides.pods.scheduling.tolerations.operator + - description: TolerationSeconds represents the period of time the toleration + (which must be of effect NoExecute, otherwise this field is ignored) + tolerates the taint. By default, it is not set, which means tolerate + the taint forever (do not evict). Zero and negative values will be treated + as 0 (evict immediately) by the system. + displayName: Shards Overrides Pods Scheduling Tolerations Toleration Seconds + path: shards.overrides.pods.scheduling.tolerations.tolerationSeconds + - description: Value is the taint value the toleration matches to. If the + operator is Exists, the value should be empty, otherwise just a regular + string. + displayName: Shards Overrides Pods Scheduling Tolerations Value + path: shards.overrides.pods.scheduling.tolerations.value + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Key + path: shards.overrides.pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Operator + path: shards.overrides.pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Values + path: shards.overrides.pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Fields Key + path: shards.overrides.pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Fields Operator + path: shards.overrides.pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Fields Values + path: shards.overrides.pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Node Affinity Preferred + During Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Node Affinity Required During + Scheduling Ignored During Execution Node Selector Terms Match Expressions + Key + path: shards.overrides.pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Node Affinity Required During + Scheduling Ignored During Execution Node Selector Terms Match Expressions + Operator + path: shards.overrides.pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Node Affinity Required During + Scheduling Ignored During Execution Node Selector Terms Match Expressions + Values + path: shards.overrides.pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Node Affinity Required During + Scheduling Ignored During Execution Node Selector Terms Match Fields + Key + path: shards.overrides.pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Node Affinity Required During + Scheduling Ignored During Execution Node Selector Terms Match Fields + Operator + path: shards.overrides.pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Node Affinity Required During + Scheduling Ignored During Execution Node Selector Terms Match Fields + Values + path: shards.overrides.pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Shards Overrides Pods Scheduling Priority Class Name + path: shards.overrides.pods.scheduling.priorityClassName + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Key + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Operator + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Values + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Labels + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Match Label Keys + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Mismatch Label + Keys + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Namespace Selector + Match Expressions Key + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Namespace Selector + Match Expressions Operator + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Namespace Selector + Match Expressions Values + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Namespace Selector + Match Labels + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Namespaces + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Pod Affinity Term Topology Key + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Pod Affinity Preferred During + Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Label Selector Match Expressions + Key + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Label Selector Match Expressions + Operator + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Label Selector Match Expressions + Values + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Label Selector Match Labels + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Match Label Keys + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Mismatch Label Keys + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Namespace Selector Match Expressions + Key + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Namespace Selector Match Expressions + Operator + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Namespace Selector Match Expressions + Values + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Namespace Selector Match Labels + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Namespaces + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Pod Affinity Required During + Scheduling Ignored During Execution Topology Key + path: shards.overrides.pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Key + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Operator + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Values + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Labels + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Match Label + Keys + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Mismatch + Label Keys + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Expressions Key + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Expressions Operator + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Expressions Values + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Labels + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespaces + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Topology + Key + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Preferred + During Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Label Selector Match Expressions + Key + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Label Selector Match Expressions + Operator + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Label Selector Match Expressions + Values + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Label Selector Match Labels + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Match Label Keys + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Mismatch Label Keys + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Expressions Key + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Expressions Operator + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Expressions Values + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Labels + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Namespaces + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Pod Anti Affinity Required + During Scheduling Ignored During Execution Topology Key + path: shards.overrides.pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Label Selector Match Expressions Key + path: shards.overrides.pods.scheduling.topologySpreadConstraints.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Label Selector Match Expressions Operator + path: shards.overrides.pods.scheduling.topologySpreadConstraints.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Label Selector Match Expressions Values + path: shards.overrides.pods.scheduling.topologySpreadConstraints.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Label Selector Match Labels + path: shards.overrides.pods.scheduling.topologySpreadConstraints.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Match Label Keys + path: shards.overrides.pods.scheduling.topologySpreadConstraints.matchLabelKeys + - description: 'MaxSkew describes the degree to which pods may be unevenly + distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum + permitted difference between the number of matching pods in the target + topology and the global minimum. The global minimum is the minimum number + of matching pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread as 2/2/1: In + this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P + P | P | - if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the + ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is + 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy it. + It''s a required field. Default value is 1 and 0 is not allowed.' + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Max Skew + path: shards.overrides.pods.scheduling.topologySpreadConstraints.maxSkew + - description: 'MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less + than minDomains, Pod Topology Spread treats "global minimum" as 0, and + then the calculation of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, when the number + of eligible domains is less than minDomains, scheduler won''t schedule + more than maxSkew Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are integers greater + than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is + set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 + | zone2 | zone3 | | P P | P P | P P | The number of domains is + less than 5(MinDomains), so "global minimum" is treated as 0. In this + situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any + of the three zones, it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default).' + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Min Domains + path: shards.overrides.pods.scheduling.topologySpreadConstraints.minDomains + - description: 'NodeAffinityPolicy indicates how we will treat Pod''s nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: - Honor: only + nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included + in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Node Affinity Policy + path: shards.overrides.pods.scheduling.topologySpreadConstraints.nodeAffinityPolicy + - description: 'NodeTaintsPolicy indicates how we will treat node taints + when calculating pod topology spread skew. Options are: - Honor: nodes + without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: node taints are ignored. All + nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Node Taints Policy + path: shards.overrides.pods.scheduling.topologySpreadConstraints.nodeTaintsPolicy + - description: TopologyKey is the key of node labels. Nodes that have a + label with this key and identical values are considered to be in the + same topology. We consider each as a "bucket", and try + to put balanced number of pods into each bucket. We define a domain + as a particular instance of a topology. Also, we define an eligible + domain as a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", + each zone is a domain of that topology. It's a required field. + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + Topology Key + path: shards.overrides.pods.scheduling.topologySpreadConstraints.topologyKey + - description: "WhenUnsatisfiable indicates how to deal with a pod if it\ + \ doesn't satisfy the spread constraint. - DoNotSchedule (default) tells\ + \ the scheduler not to schedule it. - ScheduleAnyway tells the scheduler\ + \ to schedule the pod in any location,\n but giving higher precedence\ + \ to topologies that would help reduce the\n skew.\nA constraint is\ + \ considered \"Unsatisfiable\" for an incoming pod if and only if every\ + \ possible node assignment for that pod would violate \"MaxSkew\" on\ + \ some topology. For example, in a 3-zone cluster, MaxSkew is set to\ + \ 1, and pods with the same labelSelector spread as 3/1/1: | zone1 |\ + \ zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is\ + \ set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3)\ + \ to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\ + \ MaxSkew(1). In other words, the cluster can still be imbalanced, but\ + \ scheduler won't make it *more* imbalanced. It's a required field." + displayName: Shards Overrides Pods Scheduling Topology Spread Constraints + When Unsatisfiable + path: shards.overrides.pods.scheduling.topologySpreadConstraints.whenUnsatisfiable + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Key + path: shards.overrides.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Operator + path: shards.overrides.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Values + path: shards.overrides.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Fields Key + path: shards.overrides.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Fields Operator + path: shards.overrides.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Preference Match Fields Values + path: shards.overrides.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Preferred + During Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.backup.nodeSelector.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Key + path: shards.overrides.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Operator + path: shards.overrides.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Values + path: shards.overrides.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Key + path: shards.overrides.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Operator + path: shards.overrides.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Selector Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Values + path: shards.overrides.pods.scheduling.backup.nodeSelector.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Key + path: shards.overrides.pods.scheduling.backup.tolerations.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Operator + path: shards.overrides.pods.scheduling.backup.tolerations.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Tolerations Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Values + path: shards.overrides.pods.scheduling.backup.tolerations.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Preferred + During Scheduling Ignored During Execution Preference Match Fields Key + path: shards.overrides.pods.scheduling.backup.tolerations.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Preferred + During Scheduling Ignored During Execution Preference Match Fields Operator + path: shards.overrides.pods.scheduling.backup.tolerations.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Backup Tolerations Preferred + During Scheduling Ignored During Execution Preference Match Fields Values + path: shards.overrides.pods.scheduling.backup.tolerations.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Preferred + During Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.backup.tolerations.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Key + path: shards.overrides.pods.scheduling.backup.tolerations.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Operator + path: shards.overrides.pods.scheduling.backup.tolerations.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Tolerations Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Values + path: shards.overrides.pods.scheduling.backup.tolerations.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Key + path: shards.overrides.pods.scheduling.backup.tolerations.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Tolerations Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Operator + path: shards.overrides.pods.scheduling.backup.tolerations.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Backup Tolerations Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Values + path: shards.overrides.pods.scheduling.backup.tolerations.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Key + path: shards.overrides.pods.scheduling.backup.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Operator + path: shards.overrides.pods.scheduling.backup.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Expressions + Values + path: shards.overrides.pods.scheduling.backup.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Fields Key + path: shards.overrides.pods.scheduling.backup.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Fields Operator + path: shards.overrides.pods.scheduling.backup.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Affinity Preferred + During Scheduling Ignored During Execution Preference Match Fields Values + path: shards.overrides.pods.scheduling.backup.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Preferred + During Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.backup.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Key + path: shards.overrides.pods.scheduling.backup.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Operator + path: shards.overrides.pods.scheduling.backup.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Affinity Required + During Scheduling Ignored During Execution Node Selector Terms Match + Expressions Values + path: shards.overrides.pods.scheduling.backup.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Key + path: shards.overrides.pods.scheduling.backup.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Shards Overrides Pods Scheduling Backup Node Affinity Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Operator + path: shards.overrides.pods.scheduling.backup.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Shards Overrides Pods Scheduling Backup Node Affinity Required + During Scheduling Ignored During Execution Node Selector Terms Match + Fields Values + path: shards.overrides.pods.scheduling.backup.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Shards Overrides Pods Scheduling Backup Priority Class Name + path: shards.overrides.pods.scheduling.backup.priorityClassName + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Key + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Operator + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Expressions Values + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Label Selector + Match Labels + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Match Label + Keys + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Mismatch + Label Keys + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Expressions Key + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Expressions Operator + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Expressions Values + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespace + Selector Match Labels + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Namespaces + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Pod Affinity Term Topology + Key + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Preferred + During Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.backup.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Label Selector Match Expressions + Key + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Label Selector Match Expressions + Operator + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Label Selector Match Expressions + Values + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Label Selector Match Labels + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Match Label Keys + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Mismatch Label Keys + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Expressions Key + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Expressions Operator + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Expressions Values + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Namespace Selector Match + Labels + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Namespaces + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Backup Pod Affinity Required + During Scheduling Ignored During Execution Topology Key + path: shards.overrides.pods.scheduling.backup.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Label Selector Match Expressions Key + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Label Selector Match Expressions Operator + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Label Selector Match Expressions Values + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Label Selector Match Labels + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Match Label Keys + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Mismatch Label Keys + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Namespace Selector Match Expressions Key + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Namespace Selector Match Expressions Operator + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Namespace Selector Match Expressions Values + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Namespace Selector Match Labels + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Namespaces + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Pod Affinity Term + Topology Key + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Preferred During Scheduling Ignored During Execution Weight + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Label Selector Match + Expressions Key + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Label Selector Match + Expressions Operator + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Label Selector Match + Expressions Values + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Label Selector Match + Labels + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Match Label Keys + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Mismatch Label Keys + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Namespace Selector + Match Expressions Key + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Namespace Selector + Match Expressions Operator + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Namespace Selector + Match Expressions Values + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Namespace Selector + Match Labels + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Namespaces + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Shards Overrides Pods Scheduling Backup Pod Anti Affinity + Required During Scheduling Ignored During Execution Topology Key + path: shards.overrides.pods.scheduling.backup.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: "managementPolicy controls how pods are created during initial\ + \ scale up, when replacing pods\n on nodes, or when scaling down. The\ + \ default policy is `OrderedReady`, where pods are created\n in increasing\ + \ order (pod-0, then pod-1, etc) and the controller will wait until\ + \ each pod is\n ready before continuing. When scaling down, the pods\ + \ are removed in the opposite order.\n The alternative policy is `Parallel`\ + \ which will create pods in parallel to match the desired\n scale without\ + \ waiting, and on scale down will delete all pods at once.\n" + displayName: Shards Overrides Pods Management Policy + path: shards.overrides.pods.managementPolicy + - description: 'name of the custom volume. The name will be implicitly prefixed + with `c-` to avoid clashing with internal operator volume names. Must + be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + ' + displayName: Shards Overrides Pods Custom Volumes Name + path: shards.overrides.pods.customVolumes.name + - description: 'defaultMode is optional: mode bits used to set permissions + on created files by default. Must be an octal value between 0000 and + 0777 or a decimal value between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for mode bits. Defaults + to 0644. Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file mode, + like fsGroup, and the result can be other mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Config Map Default Mode + path: shards.overrides.pods.customVolumes.configMap.defaultMode + - description: key is the key to project. + displayName: Shards Overrides Pods Custom Volumes Config Map Key + path: shards.overrides.pods.customVolumes.configMap.key + - description: 'mode is Optional: mode bits used to set permissions on this + file. Must be an octal value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and decimal values, JSON + requires decimal values for mode bits. If not specified, the volume + defaultMode will be used. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result can be other + mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Config Map Mode + path: shards.overrides.pods.customVolumes.configMap.mode + - description: path is the relative path of the file to map the key to. + May not be an absolute path. May not contain the path element '..'. + May not start with the string '..'. + displayName: Shards Overrides Pods Custom Volumes Config Map Path + path: shards.overrides.pods.customVolumes.configMap.path + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Volumes Config Map Name + path: shards.overrides.pods.customVolumes.configMap.name + - description: optional specify whether the ConfigMap or its keys must be + defined + displayName: Shards Overrides Pods Custom Volumes Config Map Optional + path: shards.overrides.pods.customVolumes.configMap.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Optional: mode bits to use on created files by default. + Must be a Optional: mode bits used to set permissions on created files + by default. Must be an octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Downward API Default + Mode + path: shards.overrides.pods.customVolumes.downwardAPI.defaultMode + - description: Version of the schema the FieldPath is written in terms of, + defaults to "v1". + displayName: Shards Overrides Pods Custom Volumes Downward API Field Ref + Api Version + path: shards.overrides.pods.customVolumes.downwardAPI.fieldRef.apiVersion + - description: Path of the field to select in the specified API version. + displayName: Shards Overrides Pods Custom Volumes Downward API Field Ref + Field Path + path: shards.overrides.pods.customVolumes.downwardAPI.fieldRef.fieldPath + - description: 'Optional: mode bits used to set permissions on this file, + must be an octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, the volume defaultMode + will be used. This might be in conflict with other options that affect + the file mode, like fsGroup, and the result can be other mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Downward API Mode + path: shards.overrides.pods.customVolumes.downwardAPI.mode + - description: 'Required: Path is the relative path name of the file to + be created. Must not be absolute or contain the ''..'' path. Must be + utf-8 encoded. The first item of the relative path must not start with + ''..''' + displayName: Shards Overrides Pods Custom Volumes Downward API Path + path: shards.overrides.pods.customVolumes.downwardAPI.path + - description: 'Container name: required for volumes, optional for env vars' + displayName: Shards Overrides Pods Custom Volumes Downward API Resource + Field Ref Container Name + path: shards.overrides.pods.customVolumes.downwardAPI.resourceFieldRef.containerName + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Volumes Downward API Resource + Field Ref Divisor + path: shards.overrides.pods.customVolumes.downwardAPI.resourceFieldRef.divisor + - description: 'Required: resource to select' + displayName: Shards Overrides Pods Custom Volumes Downward API Resource + Field Ref Resource + path: shards.overrides.pods.customVolumes.downwardAPI.resourceFieldRef.resource + - description: 'medium represents what type of storage medium should back + this directory. The default is "" which means to use the node''s default + medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + displayName: Shards Overrides Pods Custom Volumes Empty Dir Medium + path: shards.overrides.pods.customVolumes.emptyDir.medium + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Volumes Empty Dir Size Limit + path: shards.overrides.pods.customVolumes.emptyDir.sizeLimit + - description: directory is the target directory name. Must not contain + or start with '..'. If '.' is supplied, the volume directory will be + the git repository. Otherwise, if specified, the volume will contain + the git repository in the subdirectory with the given name. + displayName: Shards Overrides Pods Custom Volumes Git Repo Directory + path: shards.overrides.pods.customVolumes.gitRepo.directory + - description: repository is the URL + displayName: Shards Overrides Pods Custom Volumes Git Repo Repository + path: shards.overrides.pods.customVolumes.gitRepo.repository + - description: revision is the commit hash for the specified revision. + displayName: Shards Overrides Pods Custom Volumes Git Repo Revision + path: shards.overrides.pods.customVolumes.gitRepo.revision + - description: 'endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + displayName: Shards Overrides Pods Custom Volumes Glusterfs Endpoints + path: shards.overrides.pods.customVolumes.glusterfs.endpoints + - description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + displayName: Shards Overrides Pods Custom Volumes Glusterfs Path + path: shards.overrides.pods.customVolumes.glusterfs.path + - description: 'readOnly here will force the Glusterfs volume to be mounted + with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + displayName: Shards Overrides Pods Custom Volumes Glusterfs Read Only + path: shards.overrides.pods.customVolumes.glusterfs.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'path of the directory on the host. If the path is a symlink, + it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + displayName: Shards Overrides Pods Custom Volumes Host Path Path + path: shards.overrides.pods.customVolumes.hostPath.path + - description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + displayName: Shards Overrides Pods Custom Volumes Host Path Type + path: shards.overrides.pods.customVolumes.hostPath.type + - description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + displayName: Shards Overrides Pods Custom Volumes Nfs Path + path: shards.overrides.pods.customVolumes.nfs.path + - description: 'readOnly here will force the NFS export to be mounted with + read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + displayName: Shards Overrides Pods Custom Volumes Nfs Read Only + path: shards.overrides.pods.customVolumes.nfs.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + displayName: Shards Overrides Pods Custom Volumes Nfs Server + path: shards.overrides.pods.customVolumes.nfs.server + - description: defaultMode are the mode bits used to set permissions on + created files by default. Must be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode bits. Directories within + the path are not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, and the + result can be other mode bits set. + displayName: Shards Overrides Pods Custom Volumes Projected Default Mode + path: shards.overrides.pods.customVolumes.projected.defaultMode + - description: key is the label key that the selector applies to. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Label Selector Match Expressions Key + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Label Selector Match Expressions Operator + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.operator + - displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Label Selector Match Expressions Values + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.labelSelector.matchExpressions.values + - displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Label Selector Match Labels + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.labelSelector.matchLabels + - description: Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Name + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.name + - description: If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, then the combination + of signerName and labelSelector is allowed to match zero ClusterTrustBundles. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Optional + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Relative path from the volume root to write the bundle. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Path + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.path + - description: Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Cluster + Trust Bundle Signer Name + path: shards.overrides.pods.customVolumes.projected.sources.clusterTrustBundle.signerName + - description: key is the key to project. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Config + Map Key + path: shards.overrides.pods.customVolumes.projected.sources.configMap.key + - description: 'mode is Optional: mode bits used to set permissions on this + file. Must be an octal value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and decimal values, JSON + requires decimal values for mode bits. If not specified, the volume + defaultMode will be used. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result can be other + mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Config + Map Mode + path: shards.overrides.pods.customVolumes.projected.sources.configMap.mode + - description: path is the relative path of the file to map the key to. + May not be an absolute path. May not contain the path element '..'. + May not start with the string '..'. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Config + Map Path + path: shards.overrides.pods.customVolumes.projected.sources.configMap.path + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Config + Map Name + path: shards.overrides.pods.customVolumes.projected.sources.configMap.name + - description: optional specify whether the ConfigMap or its keys must be + defined + displayName: Shards Overrides Pods Custom Volumes Projected Sources Config + Map Optional + path: shards.overrides.pods.customVolumes.projected.sources.configMap.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Version of the schema the FieldPath is written in terms of, + defaults to "v1". + displayName: Shards Overrides Pods Custom Volumes Projected Sources Downward + API Field Ref Api Version + path: shards.overrides.pods.customVolumes.projected.sources.downwardAPI.fieldRef.apiVersion + - description: Path of the field to select in the specified API version. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Downward + API Field Ref Field Path + path: shards.overrides.pods.customVolumes.projected.sources.downwardAPI.fieldRef.fieldPath + - description: 'Optional: mode bits used to set permissions on this file, + must be an octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, the volume defaultMode + will be used. This might be in conflict with other options that affect + the file mode, like fsGroup, and the result can be other mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Downward + API Mode + path: shards.overrides.pods.customVolumes.projected.sources.downwardAPI.mode + - description: 'Required: Path is the relative path name of the file to + be created. Must not be absolute or contain the ''..'' path. Must be + utf-8 encoded. The first item of the relative path must not start with + ''..''' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Downward + API Path + path: shards.overrides.pods.customVolumes.projected.sources.downwardAPI.path + - description: 'Container name: required for volumes, optional for env vars' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Downward + API Resource Field Ref Container Name + path: shards.overrides.pods.customVolumes.projected.sources.downwardAPI.resourceFieldRef.containerName + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Volumes Projected Sources Downward + API Resource Field Ref Divisor + path: shards.overrides.pods.customVolumes.projected.sources.downwardAPI.resourceFieldRef.divisor + - description: 'Required: resource to select' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Downward + API Resource Field Ref Resource + path: shards.overrides.pods.customVolumes.projected.sources.downwardAPI.resourceFieldRef.resource + - description: key is the key to project. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Secret + Key + path: shards.overrides.pods.customVolumes.projected.sources.secret.key + - description: 'mode is Optional: mode bits used to set permissions on this + file. Must be an octal value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and decimal values, JSON + requires decimal values for mode bits. If not specified, the volume + defaultMode will be used. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result can be other + mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Secret + Mode + path: shards.overrides.pods.customVolumes.projected.sources.secret.mode + - description: path is the relative path of the file to map the key to. + May not be an absolute path. May not contain the path element '..'. + May not start with the string '..'. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Secret + Path + path: shards.overrides.pods.customVolumes.projected.sources.secret.path + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Volumes Projected Sources Secret + Name + path: shards.overrides.pods.customVolumes.projected.sources.secret.name + - description: optional field specify whether the Secret or its key must + be defined + displayName: Shards Overrides Pods Custom Volumes Projected Sources Secret + Optional + path: shards.overrides.pods.customVolumes.projected.sources.secret.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: audience is the intended audience of the token. A recipient + of a token must identify itself with an identifier specified in the + audience of the token, and otherwise should reject the token. The audience + defaults to the identifier of the apiserver. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Service + Account Token Audience + path: shards.overrides.pods.customVolumes.projected.sources.serviceAccountToken.audience + - description: expirationSeconds is the requested duration of validity of + the service account token. As the token approaches expiration, the kubelet + volume plugin will proactively rotate the service account token. The + kubelet will start trying to rotate the token if the token is older + than 80 percent of its time to live or if the token is older than 24 + hours.Defaults to 1 hour and must be at least 10 minutes. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Service + Account Token Expiration Seconds + path: shards.overrides.pods.customVolumes.projected.sources.serviceAccountToken.expirationSeconds + - description: path is the path relative to the mount point of the file + to project the token into. + displayName: Shards Overrides Pods Custom Volumes Projected Sources Service + Account Token Path + path: shards.overrides.pods.customVolumes.projected.sources.serviceAccountToken.path + - description: 'defaultMode is Optional: mode bits used to set permissions + on created files by default. Must be an octal value between 0000 and + 0777 or a decimal value between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for mode bits. Defaults + to 0644. Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file mode, + like fsGroup, and the result can be other mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Secret Default Mode + path: shards.overrides.pods.customVolumes.secret.defaultMode + - description: key is the key to project. + displayName: Shards Overrides Pods Custom Volumes Secret Key + path: shards.overrides.pods.customVolumes.secret.key + - description: 'mode is Optional: mode bits used to set permissions on this + file. Must be an octal value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal and decimal values, JSON + requires decimal values for mode bits. If not specified, the volume + defaultMode will be used. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result can be other + mode bits set.' + displayName: Shards Overrides Pods Custom Volumes Secret Mode + path: shards.overrides.pods.customVolumes.secret.mode + - description: path is the relative path of the file to map the key to. + May not be an absolute path. May not contain the path element '..'. + May not start with the string '..'. + displayName: Shards Overrides Pods Custom Volumes Secret Path + path: shards.overrides.pods.customVolumes.secret.path + - description: optional field specify whether the Secret or its keys must + be defined + displayName: Shards Overrides Pods Custom Volumes Secret Optional + path: shards.overrides.pods.customVolumes.secret.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'secretName is the name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + displayName: Shards Overrides Pods Custom Volumes Secret Secret Name + path: shards.overrides.pods.customVolumes.secret.secretName + - description: 'claimName is the name of a PersistentVolumeClaim in the + same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + displayName: Shards Overrides Pods Custom Volumes Persistent Volume Claim + Claim Name + path: shards.overrides.pods.customVolumes.persistentVolumeClaim.claimName + - description: readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + displayName: Shards Overrides Pods Custom Volumes Persistent Volume Claim + Read Only + path: shards.overrides.pods.customVolumes.persistentVolumeClaim.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Shards Overrides Pods Custom Init Containers Args + path: shards.overrides.pods.customInitContainers.args + - displayName: Shards Overrides Pods Custom Init Containers Command + path: shards.overrides.pods.customInitContainers.command + - description: Name of the environment variable. Must be a C_IDENTIFIER. + displayName: Shards Overrides Pods Custom Init Containers Env Name + path: shards.overrides.pods.customInitContainers.env.name + - description: 'Variable references $(VAR_NAME) are expanded using the previously + defined environment variables in the container and any service environment + variables. If a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single $, which + allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or not. Defaults + to "".' + displayName: Shards Overrides Pods Custom Init Containers Env Value + path: shards.overrides.pods.customInitContainers.env.value + - description: The key to select. + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Config Map Key Ref Key + path: shards.overrides.pods.customInitContainers.env.valueFrom.configMapKeyRef.key + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Config Map Key Ref Name + path: shards.overrides.pods.customInitContainers.env.valueFrom.configMapKeyRef.name + - description: Specify whether the ConfigMap or its key must be defined + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Config Map Key Ref Optional + path: shards.overrides.pods.customInitContainers.env.valueFrom.configMapKeyRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Version of the schema the FieldPath is written in terms of, + defaults to "v1". + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Field Ref Api Version + path: shards.overrides.pods.customInitContainers.env.valueFrom.fieldRef.apiVersion + - description: Path of the field to select in the specified API version. + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Field Ref Field Path + path: shards.overrides.pods.customInitContainers.env.valueFrom.fieldRef.fieldPath + - description: 'Container name: required for volumes, optional for env vars' + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Resource Field Ref Container Name + path: shards.overrides.pods.customInitContainers.env.valueFrom.resourceFieldRef.containerName + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Resource Field Ref Divisor + path: shards.overrides.pods.customInitContainers.env.valueFrom.resourceFieldRef.divisor + - description: 'Required: resource to select' + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Resource Field Ref Resource + path: shards.overrides.pods.customInitContainers.env.valueFrom.resourceFieldRef.resource + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Secret Key Ref Key + path: shards.overrides.pods.customInitContainers.env.valueFrom.secretKeyRef.key + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Secret Key Ref Name + path: shards.overrides.pods.customInitContainers.env.valueFrom.secretKeyRef.name + - description: Specify whether the Secret or its key must be defined + displayName: Shards Overrides Pods Custom Init Containers Env Value From + Secret Key Ref Optional + path: shards.overrides.pods.customInitContainers.env.valueFrom.secretKeyRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Init Containers Env From Config + Map Ref Name + path: shards.overrides.pods.customInitContainers.envFrom.configMapRef.name + - description: Specify whether the ConfigMap must be defined + displayName: Shards Overrides Pods Custom Init Containers Env From Config + Map Ref Optional + path: shards.overrides.pods.customInitContainers.envFrom.configMapRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: An optional identifier to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + displayName: Shards Overrides Pods Custom Init Containers Env From Prefix + path: shards.overrides.pods.customInitContainers.envFrom.prefix + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Init Containers Env From Secret + Ref Name + path: shards.overrides.pods.customInitContainers.envFrom.secretRef.name + - description: Specify whether the Secret must be defined + displayName: Shards Overrides Pods Custom Init Containers Env From Secret + Ref Optional + path: shards.overrides.pods.customInitContainers.envFrom.secretRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default + or override container images in workload controllers like Deployments + and StatefulSets.' + displayName: Shards Overrides Pods Custom Init Containers Image + path: shards.overrides.pods.customInitContainers.image + - description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults + to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + displayName: Shards Overrides Pods Custom Init Containers Image Pull Policy + path: shards.overrides.pods.customInitContainers.imagePullPolicy + - displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Exec Command + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.exec.command + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Http Get Host + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Http Get Http Headers Name + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Http Get Http Headers Value + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Http Get Path + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Http Get Scheme + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.httpGet.scheme + - description: Seconds is the number of seconds to sleep. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Sleep Seconds + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.sleep.seconds + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Post + Start Tcp Socket Host + path: shards.overrides.pods.customInitContainers.lifecycle.postStart.tcpSocket.host + - displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Exec Command + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.exec.command + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Http Get Host + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Http Get Http Headers Name + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Http Get Http Headers Value + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Http Get Path + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Http Get Scheme + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.httpGet.scheme + - description: Seconds is the number of seconds to sleep. + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Sleep Seconds + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.sleep.seconds + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Init Containers Lifecycle Pre + Stop Tcp Socket Host + path: shards.overrides.pods.customInitContainers.lifecycle.preStop.tcpSocket.host + - displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Exec Command + path: shards.overrides.pods.customInitContainers.livenessProbe.exec.command + - description: Minimum consecutive failures for the probe to be considered + failed after having succeeded. Defaults to 3. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Failure Threshold + path: shards.overrides.pods.customInitContainers.livenessProbe.failureThreshold + - description: Port number of the gRPC service. Number must be in the range + 1 to 65535. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Grpc Port + path: shards.overrides.pods.customInitContainers.livenessProbe.grpc.port + - description: 'Service is the name of the service to place in the gRPC + HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC.' + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Grpc Service + path: shards.overrides.pods.customInitContainers.livenessProbe.grpc.service + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Http Get Host + path: shards.overrides.pods.customInitContainers.livenessProbe.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Http Get Http Headers Name + path: shards.overrides.pods.customInitContainers.livenessProbe.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Http Get Http Headers Value + path: shards.overrides.pods.customInitContainers.livenessProbe.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Http Get Path + path: shards.overrides.pods.customInitContainers.livenessProbe.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Http Get Scheme + path: shards.overrides.pods.customInitContainers.livenessProbe.httpGet.scheme + - description: 'Number of seconds after the container has started before + liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Initial Delay Seconds + path: shards.overrides.pods.customInitContainers.livenessProbe.initialDelaySeconds + - description: How often (in seconds) to perform the probe. Default to 10 + seconds. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Period Seconds + path: shards.overrides.pods.customInitContainers.livenessProbe.periodSeconds + - description: Minimum consecutive successes for the probe to be considered + successful after having failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Success Threshold + path: shards.overrides.pods.customInitContainers.livenessProbe.successThreshold + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Tcp Socket Host + path: shards.overrides.pods.customInitContainers.livenessProbe.tcpSocket.host + - description: Optional duration in seconds the pod needs to terminate gracefully + upon probe failure. The grace period is the duration in seconds after + the processes running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your process. If + this value is nil, the pod's terminationGracePeriodSeconds will be used. + Otherwise, this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a beta field + and requires enabling ProbeTerminationGracePeriod feature gate. Minimum + value is 1. spec.terminationGracePeriodSeconds is used if unset. + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Termination Grace Period Seconds + path: shards.overrides.pods.customInitContainers.livenessProbe.terminationGracePeriodSeconds + - description: 'Number of seconds after which the probe times out. Defaults + to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Init Containers Liveness Probe + Timeout Seconds + path: shards.overrides.pods.customInitContainers.livenessProbe.timeoutSeconds + - description: Name of the container specified as a DNS_LABEL. Each container + in a pod must have a unique name (DNS_LABEL). Cannot be updated. + displayName: Shards Overrides Pods Custom Init Containers Name + path: shards.overrides.pods.customInitContainers.name + - description: Number of port to expose on the pod's IP address. This must + be a valid port number, 0 < x < 65536. + displayName: Shards Overrides Pods Custom Init Containers Ports Container + Port + path: shards.overrides.pods.customInitContainers.ports.containerPort + - description: What host IP to bind the external port to. + displayName: Shards Overrides Pods Custom Init Containers Ports Host IP + path: shards.overrides.pods.customInitContainers.ports.hostIP + - description: Number of port to expose on the host. If specified, this + must be a valid port number, 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers do not need this. + displayName: Shards Overrides Pods Custom Init Containers Ports Host Port + path: shards.overrides.pods.customInitContainers.ports.hostPort + - description: If specified, this must be an IANA_SVC_NAME and unique within + the pod. Each named port in a pod must have a unique name. Name for + the port that can be referred to by services. + displayName: Shards Overrides Pods Custom Init Containers Ports Name + path: shards.overrides.pods.customInitContainers.ports.name + - description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to + "TCP". + displayName: Shards Overrides Pods Custom Init Containers Ports Protocol + path: shards.overrides.pods.customInitContainers.ports.protocol + - displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Exec Command + path: shards.overrides.pods.customInitContainers.readinessProbe.exec.command + - description: Minimum consecutive failures for the probe to be considered + failed after having succeeded. Defaults to 3. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Failure Threshold + path: shards.overrides.pods.customInitContainers.readinessProbe.failureThreshold + - description: Port number of the gRPC service. Number must be in the range + 1 to 65535. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Grpc Port + path: shards.overrides.pods.customInitContainers.readinessProbe.grpc.port + - description: 'Service is the name of the service to place in the gRPC + HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC.' + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Grpc Service + path: shards.overrides.pods.customInitContainers.readinessProbe.grpc.service + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Http Get Host + path: shards.overrides.pods.customInitContainers.readinessProbe.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Http Get Http Headers Name + path: shards.overrides.pods.customInitContainers.readinessProbe.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Http Get Http Headers Value + path: shards.overrides.pods.customInitContainers.readinessProbe.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Http Get Path + path: shards.overrides.pods.customInitContainers.readinessProbe.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Http Get Scheme + path: shards.overrides.pods.customInitContainers.readinessProbe.httpGet.scheme + - description: 'Number of seconds after the container has started before + liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Initial Delay Seconds + path: shards.overrides.pods.customInitContainers.readinessProbe.initialDelaySeconds + - description: How often (in seconds) to perform the probe. Default to 10 + seconds. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Period Seconds + path: shards.overrides.pods.customInitContainers.readinessProbe.periodSeconds + - description: Minimum consecutive successes for the probe to be considered + successful after having failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Success Threshold + path: shards.overrides.pods.customInitContainers.readinessProbe.successThreshold + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Tcp Socket Host + path: shards.overrides.pods.customInitContainers.readinessProbe.tcpSocket.host + - description: Optional duration in seconds the pod needs to terminate gracefully + upon probe failure. The grace period is the duration in seconds after + the processes running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your process. If + this value is nil, the pod's terminationGracePeriodSeconds will be used. + Otherwise, this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a beta field + and requires enabling ProbeTerminationGracePeriod feature gate. Minimum + value is 1. spec.terminationGracePeriodSeconds is used if unset. + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Termination Grace Period Seconds + path: shards.overrides.pods.customInitContainers.readinessProbe.terminationGracePeriodSeconds + - description: 'Number of seconds after which the probe times out. Defaults + to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Init Containers Readiness Probe + Timeout Seconds + path: shards.overrides.pods.customInitContainers.readinessProbe.timeoutSeconds + - description: 'Name of the resource to which this resource resize policy + applies. Supported values: cpu, memory.' + displayName: Shards Overrides Pods Custom Init Containers Resize Policy + Resource Name + path: shards.overrides.pods.customInitContainers.resizePolicy.resourceName + - description: Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + displayName: Shards Overrides Pods Custom Init Containers Resize Policy + Restart Policy + path: shards.overrides.pods.customInitContainers.resizePolicy.restartPolicy + - description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource available + inside a container. + displayName: Shards Overrides Pods Custom Init Containers Resources Claims + Name + path: shards.overrides.pods.customInitContainers.resources.claims.name + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Init Containers Resources Limits + path: shards.overrides.pods.customInitContainers.resources.limits + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Init Containers Resources Requests + path: shards.overrides.pods.customInitContainers.resources.requests + - description: 'RestartPolicy defines the restart behavior of individual + containers in a pod. This field may only be set for init containers, + and the only allowed value is "Always". For non-init containers or when + this field is not specified, the restart behavior is defined by the + Pod''s restart policy and the container type. Setting the RestartPolicy + as "Always" for the init container will have the following effect: this + init container will be continually restarted on exit until all regular + containers have terminated. Once all regular containers have completed, + all init containers with restartPolicy "Always" will be shut down. This + lifecycle differs from normal init containers and is often referred + to as a "sidecar" container. Although this init container still starts + in the init container sequence, it does not wait for the container to + complete before proceeding to the next init container. Instead, the + next init container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + displayName: Shards Overrides Pods Custom Init Containers Restart Policy + path: shards.overrides.pods.customInitContainers.restartPolicy + - description: 'AllowPrivilegeEscalation controls whether a process can + gain more privileges than its parent process. This bool directly controls + if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' + displayName: Shards Overrides Pods Custom Init Containers Security Context + Allow Privilege Escalation + path: shards.overrides.pods.customInitContainers.securityContext.allowPrivilegeEscalation + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Shards Overrides Pods Custom Init Containers Security Context + Capabilities Add + path: shards.overrides.pods.customInitContainers.securityContext.capabilities.add + - displayName: Shards Overrides Pods Custom Init Containers Security Context + Capabilities Drop + path: shards.overrides.pods.customInitContainers.securityContext.capabilities.drop + - description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. Note that this field cannot be set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Privileged + path: shards.overrides.pods.customInitContainers.securityContext.privileged + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults + for readonly paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot be set when + spec.os.name is windows. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Proc Mount + path: shards.overrides.pods.customInitContainers.securityContext.procMount + - description: Whether this container has a read-only root filesystem. Default + is false. Note that this field cannot be set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Read Only Root Filesystem + path: shards.overrides.pods.customInitContainers.securityContext.readOnlyRootFilesystem + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The GID to run the entrypoint of the container process. Uses + runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note that this field cannot be + set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Run As Group + path: shards.overrides.pods.customInitContainers.securityContext.runAsGroup + - description: Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that + it does not run as UID 0 (root) and fail to start the container if it + does. If unset or false, no such validation will be performed. May also + be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Run As Non Root + path: shards.overrides.pods.customInitContainers.securityContext.runAsNonRoot + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The UID to run the entrypoint of the container process. Defaults + to user specified in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Run As User + path: shards.overrides.pods.customInitContainers.securityContext.runAsUser + - description: Level is SELinux level label that applies to the container. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Se Linux Options Level + path: shards.overrides.pods.customInitContainers.securityContext.seLinuxOptions.level + - description: Role is a SELinux role label that applies to the container. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Se Linux Options Role + path: shards.overrides.pods.customInitContainers.securityContext.seLinuxOptions.role + - description: Type is a SELinux type label that applies to the container. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Se Linux Options Type + path: shards.overrides.pods.customInitContainers.securityContext.seLinuxOptions.type + - description: User is a SELinux user label that applies to the container. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Se Linux Options User + path: shards.overrides.pods.customInitContainers.securityContext.seLinuxOptions.user + - description: localhostProfile indicates a profile defined in a file on + the node should be used. The profile must be preconfigured on the node + to work. Must be a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". Must NOT + be set for any other type. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Seccomp Profile Localhost Profile + path: shards.overrides.pods.customInitContainers.securityContext.seccompProfile.localhostProfile + - description: 'type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied.' + displayName: Shards Overrides Pods Custom Init Containers Security Context + Seccomp Profile Type + path: shards.overrides.pods.customInitContainers.securityContext.seccompProfile.type + - description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName + field. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Windows Options Gmsa Credential Spec + path: shards.overrides.pods.customInitContainers.securityContext.windowsOptions.gmsaCredentialSpec + - description: GMSACredentialSpecName is the name of the GMSA credential + spec to use. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Windows Options Gmsa Credential Spec Name + path: shards.overrides.pods.customInitContainers.securityContext.windowsOptions.gmsaCredentialSpecName + - description: HostProcess determines if a container should be run as a + 'Host Process' container. All of a Pod's containers must have the same + effective HostProcess value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Windows Options Host Process + path: shards.overrides.pods.customInitContainers.securityContext.windowsOptions.hostProcess + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The UserName in Windows to run the entrypoint of the container + process. Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext takes + precedence. + displayName: Shards Overrides Pods Custom Init Containers Security Context + Windows Options Run As User Name + path: shards.overrides.pods.customInitContainers.securityContext.windowsOptions.runAsUserName + - displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Exec Command + path: shards.overrides.pods.customInitContainers.startupProbe.exec.command + - description: Minimum consecutive failures for the probe to be considered + failed after having succeeded. Defaults to 3. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Failure Threshold + path: shards.overrides.pods.customInitContainers.startupProbe.failureThreshold + - description: Port number of the gRPC service. Number must be in the range + 1 to 65535. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Grpc Port + path: shards.overrides.pods.customInitContainers.startupProbe.grpc.port + - description: 'Service is the name of the service to place in the gRPC + HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC.' + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Grpc Service + path: shards.overrides.pods.customInitContainers.startupProbe.grpc.service + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Http Get Host + path: shards.overrides.pods.customInitContainers.startupProbe.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Http Get Http Headers Name + path: shards.overrides.pods.customInitContainers.startupProbe.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Http Get Http Headers Value + path: shards.overrides.pods.customInitContainers.startupProbe.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Http Get Path + path: shards.overrides.pods.customInitContainers.startupProbe.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Http Get Scheme + path: shards.overrides.pods.customInitContainers.startupProbe.httpGet.scheme + - description: 'Number of seconds after the container has started before + liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Initial Delay Seconds + path: shards.overrides.pods.customInitContainers.startupProbe.initialDelaySeconds + - description: How often (in seconds) to perform the probe. Default to 10 + seconds. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Period Seconds + path: shards.overrides.pods.customInitContainers.startupProbe.periodSeconds + - description: Minimum consecutive successes for the probe to be considered + successful after having failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Success Threshold + path: shards.overrides.pods.customInitContainers.startupProbe.successThreshold + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Tcp Socket Host + path: shards.overrides.pods.customInitContainers.startupProbe.tcpSocket.host + - description: Optional duration in seconds the pod needs to terminate gracefully + upon probe failure. The grace period is the duration in seconds after + the processes running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your process. If + this value is nil, the pod's terminationGracePeriodSeconds will be used. + Otherwise, this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a beta field + and requires enabling ProbeTerminationGracePeriod feature gate. Minimum + value is 1. spec.terminationGracePeriodSeconds is used if unset. + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Termination Grace Period Seconds + path: shards.overrides.pods.customInitContainers.startupProbe.terminationGracePeriodSeconds + - description: 'Number of seconds after which the probe times out. Defaults + to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Init Containers Startup Probe + Timeout Seconds + path: shards.overrides.pods.customInitContainers.startupProbe.timeoutSeconds + - description: Whether this container should allocate a buffer for stdin + in the container runtime. If this is not set, reads from stdin in the + container will always result in EOF. Default is false. + displayName: Shards Overrides Pods Custom Init Containers Stdin + path: shards.overrides.pods.customInitContainers.stdin + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Whether the container runtime should close the stdin channel + after it has been opened by a single attach. When stdin is true the + stdin stream will remain open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag is false, a container + processes that reads from stdin will never receive an EOF. Default is + false + displayName: Shards Overrides Pods Custom Init Containers Stdin Once + path: shards.overrides.pods.customInitContainers.stdinOnce + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, such + as an assertion failure message. Will be truncated by the node if greater + than 4096 bytes. The total message length across all containers will + be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + displayName: Shards Overrides Pods Custom Init Containers Termination + Message Path + path: shards.overrides.pods.customInitContainers.terminationMessagePath + - description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate the + container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination message + file is empty and the container exited with an error. The log output + is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + displayName: Shards Overrides Pods Custom Init Containers Termination + Message Policy + path: shards.overrides.pods.customInitContainers.terminationMessagePolicy + - description: Whether this container should allocate a TTY for itself, + also requires 'stdin' to be true. Default is false. + displayName: Shards Overrides Pods Custom Init Containers Tty + path: shards.overrides.pods.customInitContainers.tty + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: devicePath is the path inside of the container that the device + will be mapped to. + displayName: Shards Overrides Pods Custom Init Containers Volume Devices + Device Path + path: shards.overrides.pods.customInitContainers.volumeDevices.devicePath + - description: name must match the name of a persistentVolumeClaim in the + pod + displayName: Shards Overrides Pods Custom Init Containers Volume Devices + Name + path: shards.overrides.pods.customInitContainers.volumeDevices.name + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Shards Overrides Pods Custom Init Containers Volume Mounts + Mount Path + path: shards.overrides.pods.customInitContainers.volumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Shards Overrides Pods Custom Init Containers Volume Mounts + Mount Propagation + path: shards.overrides.pods.customInitContainers.volumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Shards Overrides Pods Custom Init Containers Volume Mounts + Name + path: shards.overrides.pods.customInitContainers.volumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Shards Overrides Pods Custom Init Containers Volume Mounts + Read Only + path: shards.overrides.pods.customInitContainers.volumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Shards Overrides Pods Custom Init Containers Volume Mounts + Sub Path + path: shards.overrides.pods.customInitContainers.volumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Shards Overrides Pods Custom Init Containers Volume Mounts + Sub Path Expr + path: shards.overrides.pods.customInitContainers.volumeMounts.subPathExpr + - description: Container's working directory. If not specified, the container + runtime's default will be used, which might be configured in the container + image. Cannot be updated. + displayName: Shards Overrides Pods Custom Init Containers Working Dir + path: shards.overrides.pods.customInitContainers.workingDir + - displayName: Shards Overrides Pods Custom Containers Args + path: shards.overrides.pods.customContainers.args + - displayName: Shards Overrides Pods Custom Containers Command + path: shards.overrides.pods.customContainers.command + - description: Name of the environment variable. Must be a C_IDENTIFIER. + displayName: Shards Overrides Pods Custom Containers Env Name + path: shards.overrides.pods.customContainers.env.name + - description: 'Variable references $(VAR_NAME) are expanded using the previously + defined environment variables in the container and any service environment + variables. If a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single $, which + allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or not. Defaults + to "".' + displayName: Shards Overrides Pods Custom Containers Env Value + path: shards.overrides.pods.customContainers.env.value + - description: The key to select. + displayName: Shards Overrides Pods Custom Containers Env Value From Config + Map Key Ref Key + path: shards.overrides.pods.customContainers.env.valueFrom.configMapKeyRef.key + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Containers Env Value From Config + Map Key Ref Name + path: shards.overrides.pods.customContainers.env.valueFrom.configMapKeyRef.name + - description: Specify whether the ConfigMap or its key must be defined + displayName: Shards Overrides Pods Custom Containers Env Value From Config + Map Key Ref Optional + path: shards.overrides.pods.customContainers.env.valueFrom.configMapKeyRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Version of the schema the FieldPath is written in terms of, + defaults to "v1". + displayName: Shards Overrides Pods Custom Containers Env Value From Field + Ref Api Version + path: shards.overrides.pods.customContainers.env.valueFrom.fieldRef.apiVersion + - description: Path of the field to select in the specified API version. + displayName: Shards Overrides Pods Custom Containers Env Value From Field + Ref Field Path + path: shards.overrides.pods.customContainers.env.valueFrom.fieldRef.fieldPath + - description: 'Container name: required for volumes, optional for env vars' + displayName: Shards Overrides Pods Custom Containers Env Value From Resource + Field Ref Container Name + path: shards.overrides.pods.customContainers.env.valueFrom.resourceFieldRef.containerName + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Containers Env Value From Resource + Field Ref Divisor + path: shards.overrides.pods.customContainers.env.valueFrom.resourceFieldRef.divisor + - description: 'Required: resource to select' + displayName: Shards Overrides Pods Custom Containers Env Value From Resource + Field Ref Resource + path: shards.overrides.pods.customContainers.env.valueFrom.resourceFieldRef.resource + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Shards Overrides Pods Custom Containers Env Value From Secret + Key Ref Key + path: shards.overrides.pods.customContainers.env.valueFrom.secretKeyRef.key + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Containers Env Value From Secret + Key Ref Name + path: shards.overrides.pods.customContainers.env.valueFrom.secretKeyRef.name + - description: Specify whether the Secret or its key must be defined + displayName: Shards Overrides Pods Custom Containers Env Value From Secret + Key Ref Optional + path: shards.overrides.pods.customContainers.env.valueFrom.secretKeyRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Containers Env From Config Map + Ref Name + path: shards.overrides.pods.customContainers.envFrom.configMapRef.name + - description: Specify whether the ConfigMap must be defined + displayName: Shards Overrides Pods Custom Containers Env From Config Map + Ref Optional + path: shards.overrides.pods.customContainers.envFrom.configMapRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: An optional identifier to prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + displayName: Shards Overrides Pods Custom Containers Env From Prefix + path: shards.overrides.pods.customContainers.envFrom.prefix + - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + displayName: Shards Overrides Pods Custom Containers Env From Secret Ref + Name + path: shards.overrides.pods.customContainers.envFrom.secretRef.name + - description: Specify whether the Secret must be defined + displayName: Shards Overrides Pods Custom Containers Env From Secret Ref + Optional + path: shards.overrides.pods.customContainers.envFrom.secretRef.optional + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default + or override container images in workload controllers like Deployments + and StatefulSets.' + displayName: Shards Overrides Pods Custom Containers Image + path: shards.overrides.pods.customContainers.image + - description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults + to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + displayName: Shards Overrides Pods Custom Containers Image Pull Policy + path: shards.overrides.pods.customContainers.imagePullPolicy + - displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Exec Command + path: shards.overrides.pods.customContainers.lifecycle.postStart.exec.command + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Http Get Host + path: shards.overrides.pods.customContainers.lifecycle.postStart.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Http Get Http Headers Name + path: shards.overrides.pods.customContainers.lifecycle.postStart.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Http Get Http Headers Value + path: shards.overrides.pods.customContainers.lifecycle.postStart.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Http Get Path + path: shards.overrides.pods.customContainers.lifecycle.postStart.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Http Get Scheme + path: shards.overrides.pods.customContainers.lifecycle.postStart.httpGet.scheme + - description: Seconds is the number of seconds to sleep. + displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Sleep Seconds + path: shards.overrides.pods.customContainers.lifecycle.postStart.sleep.seconds + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Containers Lifecycle Post Start + Tcp Socket Host + path: shards.overrides.pods.customContainers.lifecycle.postStart.tcpSocket.host + - displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Exec Command + path: shards.overrides.pods.customContainers.lifecycle.preStop.exec.command + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Http Get Host + path: shards.overrides.pods.customContainers.lifecycle.preStop.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Http Get Http Headers Name + path: shards.overrides.pods.customContainers.lifecycle.preStop.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Http Get Http Headers Value + path: shards.overrides.pods.customContainers.lifecycle.preStop.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Http Get Path + path: shards.overrides.pods.customContainers.lifecycle.preStop.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Http Get Scheme + path: shards.overrides.pods.customContainers.lifecycle.preStop.httpGet.scheme + - description: Seconds is the number of seconds to sleep. + displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Sleep Seconds + path: shards.overrides.pods.customContainers.lifecycle.preStop.sleep.seconds + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Containers Lifecycle Pre Stop + Tcp Socket Host + path: shards.overrides.pods.customContainers.lifecycle.preStop.tcpSocket.host + - displayName: Shards Overrides Pods Custom Containers Liveness Probe Exec + Command + path: shards.overrides.pods.customContainers.livenessProbe.exec.command + - description: Minimum consecutive failures for the probe to be considered + failed after having succeeded. Defaults to 3. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Failure + Threshold + path: shards.overrides.pods.customContainers.livenessProbe.failureThreshold + - description: Port number of the gRPC service. Number must be in the range + 1 to 65535. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Grpc + Port + path: shards.overrides.pods.customContainers.livenessProbe.grpc.port + - description: 'Service is the name of the service to place in the gRPC + HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC.' + displayName: Shards Overrides Pods Custom Containers Liveness Probe Grpc + Service + path: shards.overrides.pods.customContainers.livenessProbe.grpc.service + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Http + Get Host + path: shards.overrides.pods.customContainers.livenessProbe.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Http + Get Http Headers Name + path: shards.overrides.pods.customContainers.livenessProbe.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Containers Liveness Probe Http + Get Http Headers Value + path: shards.overrides.pods.customContainers.livenessProbe.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Http + Get Path + path: shards.overrides.pods.customContainers.livenessProbe.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Http + Get Scheme + path: shards.overrides.pods.customContainers.livenessProbe.httpGet.scheme + - description: 'Number of seconds after the container has started before + liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Containers Liveness Probe Initial + Delay Seconds + path: shards.overrides.pods.customContainers.livenessProbe.initialDelaySeconds + - description: How often (in seconds) to perform the probe. Default to 10 + seconds. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Period + Seconds + path: shards.overrides.pods.customContainers.livenessProbe.periodSeconds + - description: Minimum consecutive successes for the probe to be considered + successful after having failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Success + Threshold + path: shards.overrides.pods.customContainers.livenessProbe.successThreshold + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Containers Liveness Probe Tcp + Socket Host + path: shards.overrides.pods.customContainers.livenessProbe.tcpSocket.host + - description: Optional duration in seconds the pod needs to terminate gracefully + upon probe failure. The grace period is the duration in seconds after + the processes running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your process. If + this value is nil, the pod's terminationGracePeriodSeconds will be used. + Otherwise, this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a beta field + and requires enabling ProbeTerminationGracePeriod feature gate. Minimum + value is 1. spec.terminationGracePeriodSeconds is used if unset. + displayName: Shards Overrides Pods Custom Containers Liveness Probe Termination + Grace Period Seconds + path: shards.overrides.pods.customContainers.livenessProbe.terminationGracePeriodSeconds + - description: 'Number of seconds after which the probe times out. Defaults + to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Containers Liveness Probe Timeout + Seconds + path: shards.overrides.pods.customContainers.livenessProbe.timeoutSeconds + - description: Name of the container specified as a DNS_LABEL. Each container + in a pod must have a unique name (DNS_LABEL). Cannot be updated. + displayName: Shards Overrides Pods Custom Containers Name + path: shards.overrides.pods.customContainers.name + - description: Number of port to expose on the pod's IP address. This must + be a valid port number, 0 < x < 65536. + displayName: Shards Overrides Pods Custom Containers Ports Container Port + path: shards.overrides.pods.customContainers.ports.containerPort + - description: What host IP to bind the external port to. + displayName: Shards Overrides Pods Custom Containers Ports Host IP + path: shards.overrides.pods.customContainers.ports.hostIP + - description: Number of port to expose on the host. If specified, this + must be a valid port number, 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers do not need this. + displayName: Shards Overrides Pods Custom Containers Ports Host Port + path: shards.overrides.pods.customContainers.ports.hostPort + - description: If specified, this must be an IANA_SVC_NAME and unique within + the pod. Each named port in a pod must have a unique name. Name for + the port that can be referred to by services. + displayName: Shards Overrides Pods Custom Containers Ports Name + path: shards.overrides.pods.customContainers.ports.name + - description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to + "TCP". + displayName: Shards Overrides Pods Custom Containers Ports Protocol + path: shards.overrides.pods.customContainers.ports.protocol + - displayName: Shards Overrides Pods Custom Containers Readiness Probe Exec + Command + path: shards.overrides.pods.customContainers.readinessProbe.exec.command + - description: Minimum consecutive failures for the probe to be considered + failed after having succeeded. Defaults to 3. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Failure + Threshold + path: shards.overrides.pods.customContainers.readinessProbe.failureThreshold + - description: Port number of the gRPC service. Number must be in the range + 1 to 65535. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Grpc + Port + path: shards.overrides.pods.customContainers.readinessProbe.grpc.port + - description: 'Service is the name of the service to place in the gRPC + HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC.' + displayName: Shards Overrides Pods Custom Containers Readiness Probe Grpc + Service + path: shards.overrides.pods.customContainers.readinessProbe.grpc.service + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Http + Get Host + path: shards.overrides.pods.customContainers.readinessProbe.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Http + Get Http Headers Name + path: shards.overrides.pods.customContainers.readinessProbe.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Containers Readiness Probe Http + Get Http Headers Value + path: shards.overrides.pods.customContainers.readinessProbe.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Http + Get Path + path: shards.overrides.pods.customContainers.readinessProbe.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Http + Get Scheme + path: shards.overrides.pods.customContainers.readinessProbe.httpGet.scheme + - description: 'Number of seconds after the container has started before + liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Containers Readiness Probe Initial + Delay Seconds + path: shards.overrides.pods.customContainers.readinessProbe.initialDelaySeconds + - description: How often (in seconds) to perform the probe. Default to 10 + seconds. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Period + Seconds + path: shards.overrides.pods.customContainers.readinessProbe.periodSeconds + - description: Minimum consecutive successes for the probe to be considered + successful after having failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Success + Threshold + path: shards.overrides.pods.customContainers.readinessProbe.successThreshold + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Containers Readiness Probe Tcp + Socket Host + path: shards.overrides.pods.customContainers.readinessProbe.tcpSocket.host + - description: Optional duration in seconds the pod needs to terminate gracefully + upon probe failure. The grace period is the duration in seconds after + the processes running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your process. If + this value is nil, the pod's terminationGracePeriodSeconds will be used. + Otherwise, this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a beta field + and requires enabling ProbeTerminationGracePeriod feature gate. Minimum + value is 1. spec.terminationGracePeriodSeconds is used if unset. + displayName: Shards Overrides Pods Custom Containers Readiness Probe Termination + Grace Period Seconds + path: shards.overrides.pods.customContainers.readinessProbe.terminationGracePeriodSeconds + - description: 'Number of seconds after which the probe times out. Defaults + to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Containers Readiness Probe Timeout + Seconds + path: shards.overrides.pods.customContainers.readinessProbe.timeoutSeconds + - description: 'Name of the resource to which this resource resize policy + applies. Supported values: cpu, memory.' + displayName: Shards Overrides Pods Custom Containers Resize Policy Resource + Name + path: shards.overrides.pods.customContainers.resizePolicy.resourceName + - description: Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + displayName: Shards Overrides Pods Custom Containers Resize Policy Restart + Policy + path: shards.overrides.pods.customContainers.resizePolicy.restartPolicy + - description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource available + inside a container. + displayName: Shards Overrides Pods Custom Containers Resources Claims + Name + path: shards.overrides.pods.customContainers.resources.claims.name + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Containers Resources Limits + path: shards.overrides.pods.customContainers.resources.limits + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Shards Overrides Pods Custom Containers Resources Requests + path: shards.overrides.pods.customContainers.resources.requests + - description: 'RestartPolicy defines the restart behavior of individual + containers in a pod. This field may only be set for init containers, + and the only allowed value is "Always". For non-init containers or when + this field is not specified, the restart behavior is defined by the + Pod''s restart policy and the container type. Setting the RestartPolicy + as "Always" for the init container will have the following effect: this + init container will be continually restarted on exit until all regular + containers have terminated. Once all regular containers have completed, + all init containers with restartPolicy "Always" will be shut down. This + lifecycle differs from normal init containers and is often referred + to as a "sidecar" container. Although this init container still starts + in the init container sequence, it does not wait for the container to + complete before proceeding to the next init container. Instead, the + next init container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + displayName: Shards Overrides Pods Custom Containers Restart Policy + path: shards.overrides.pods.customContainers.restartPolicy + - description: 'AllowPrivilegeEscalation controls whether a process can + gain more privileges than its parent process. This bool directly controls + if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' + displayName: Shards Overrides Pods Custom Containers Security Context + Allow Privilege Escalation + path: shards.overrides.pods.customContainers.securityContext.allowPrivilegeEscalation + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Shards Overrides Pods Custom Containers Security Context + Capabilities Add + path: shards.overrides.pods.customContainers.securityContext.capabilities.add + - displayName: Shards Overrides Pods Custom Containers Security Context + Capabilities Drop + path: shards.overrides.pods.customContainers.securityContext.capabilities.drop + - description: Run container in privileged mode. Processes in privileged + containers are essentially equivalent to root on the host. Defaults + to false. Note that this field cannot be set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Containers Security Context + Privileged + path: shards.overrides.pods.customContainers.securityContext.privileged + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults + for readonly paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot be set when + spec.os.name is windows. + displayName: Shards Overrides Pods Custom Containers Security Context + Proc Mount + path: shards.overrides.pods.customContainers.securityContext.procMount + - description: Whether this container has a read-only root filesystem. Default + is false. Note that this field cannot be set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Containers Security Context + Read Only Root Filesystem + path: shards.overrides.pods.customContainers.securityContext.readOnlyRootFilesystem + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The GID to run the entrypoint of the container process. Uses + runtime default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note that this field cannot be + set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Containers Security Context + Run As Group + path: shards.overrides.pods.customContainers.securityContext.runAsGroup + - description: Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that + it does not run as UID 0 (root) and fail to start the container if it + does. If unset or false, no such validation will be performed. May also + be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + displayName: Shards Overrides Pods Custom Containers Security Context + Run As Non Root + path: shards.overrides.pods.customContainers.securityContext.runAsNonRoot + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The UID to run the entrypoint of the container process. Defaults + to user specified in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + displayName: Shards Overrides Pods Custom Containers Security Context + Run As User + path: shards.overrides.pods.customContainers.securityContext.runAsUser + - description: Level is SELinux level label that applies to the container. + displayName: Shards Overrides Pods Custom Containers Security Context + Se Linux Options Level + path: shards.overrides.pods.customContainers.securityContext.seLinuxOptions.level + - description: Role is a SELinux role label that applies to the container. + displayName: Shards Overrides Pods Custom Containers Security Context + Se Linux Options Role + path: shards.overrides.pods.customContainers.securityContext.seLinuxOptions.role + - description: Type is a SELinux type label that applies to the container. + displayName: Shards Overrides Pods Custom Containers Security Context + Se Linux Options Type + path: shards.overrides.pods.customContainers.securityContext.seLinuxOptions.type + - description: User is a SELinux user label that applies to the container. + displayName: Shards Overrides Pods Custom Containers Security Context + Se Linux Options User + path: shards.overrides.pods.customContainers.securityContext.seLinuxOptions.user + - description: localhostProfile indicates a profile defined in a file on + the node should be used. The profile must be preconfigured on the node + to work. Must be a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". Must NOT + be set for any other type. + displayName: Shards Overrides Pods Custom Containers Security Context + Seccomp Profile Localhost Profile + path: shards.overrides.pods.customContainers.securityContext.seccompProfile.localhostProfile + - description: 'type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied.' + displayName: Shards Overrides Pods Custom Containers Security Context + Seccomp Profile Type + path: shards.overrides.pods.customContainers.securityContext.seccompProfile.type + - description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName + field. + displayName: Shards Overrides Pods Custom Containers Security Context + Windows Options Gmsa Credential Spec + path: shards.overrides.pods.customContainers.securityContext.windowsOptions.gmsaCredentialSpec + - description: GMSACredentialSpecName is the name of the GMSA credential + spec to use. + displayName: Shards Overrides Pods Custom Containers Security Context + Windows Options Gmsa Credential Spec Name + path: shards.overrides.pods.customContainers.securityContext.windowsOptions.gmsaCredentialSpecName + - description: HostProcess determines if a container should be run as a + 'Host Process' container. All of a Pod's containers must have the same + effective HostProcess value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + displayName: Shards Overrides Pods Custom Containers Security Context + Windows Options Host Process + path: shards.overrides.pods.customContainers.securityContext.windowsOptions.hostProcess + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The UserName in Windows to run the entrypoint of the container + process. Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext takes + precedence. + displayName: Shards Overrides Pods Custom Containers Security Context + Windows Options Run As User Name + path: shards.overrides.pods.customContainers.securityContext.windowsOptions.runAsUserName + - displayName: Shards Overrides Pods Custom Containers Startup Probe Exec + Command + path: shards.overrides.pods.customContainers.startupProbe.exec.command + - description: Minimum consecutive failures for the probe to be considered + failed after having succeeded. Defaults to 3. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Startup Probe Failure + Threshold + path: shards.overrides.pods.customContainers.startupProbe.failureThreshold + - description: Port number of the gRPC service. Number must be in the range + 1 to 65535. + displayName: Shards Overrides Pods Custom Containers Startup Probe Grpc + Port + path: shards.overrides.pods.customContainers.startupProbe.grpc.port + - description: 'Service is the name of the service to place in the gRPC + HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC.' + displayName: Shards Overrides Pods Custom Containers Startup Probe Grpc + Service + path: shards.overrides.pods.customContainers.startupProbe.grpc.service + - description: Host name to connect to, defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + displayName: Shards Overrides Pods Custom Containers Startup Probe Http + Get Host + path: shards.overrides.pods.customContainers.startupProbe.httpGet.host + - description: The header field name. This will be canonicalized upon output, + so case-variant names will be understood as the same header. + displayName: Shards Overrides Pods Custom Containers Startup Probe Http + Get Http Headers Name + path: shards.overrides.pods.customContainers.startupProbe.httpGet.httpHeaders.name + - description: The header field value + displayName: Shards Overrides Pods Custom Containers Startup Probe Http + Get Http Headers Value + path: shards.overrides.pods.customContainers.startupProbe.httpGet.httpHeaders.value + - description: Path to access on the HTTP server. + displayName: Shards Overrides Pods Custom Containers Startup Probe Http + Get Path + path: shards.overrides.pods.customContainers.startupProbe.httpGet.path + - description: Scheme to use for connecting to the host. Defaults to HTTP. + displayName: Shards Overrides Pods Custom Containers Startup Probe Http + Get Scheme + path: shards.overrides.pods.customContainers.startupProbe.httpGet.scheme + - description: 'Number of seconds after the container has started before + liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Containers Startup Probe Initial + Delay Seconds + path: shards.overrides.pods.customContainers.startupProbe.initialDelaySeconds + - description: How often (in seconds) to perform the probe. Default to 10 + seconds. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Startup Probe Period + Seconds + path: shards.overrides.pods.customContainers.startupProbe.periodSeconds + - description: Minimum consecutive successes for the probe to be considered + successful after having failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + displayName: Shards Overrides Pods Custom Containers Startup Probe Success + Threshold + path: shards.overrides.pods.customContainers.startupProbe.successThreshold + - description: 'Optional: Host name to connect to, defaults to the pod IP.' + displayName: Shards Overrides Pods Custom Containers Startup Probe Tcp + Socket Host + path: shards.overrides.pods.customContainers.startupProbe.tcpSocket.host + - description: Optional duration in seconds the pod needs to terminate gracefully + upon probe failure. The grace period is the duration in seconds after + the processes running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your process. If + this value is nil, the pod's terminationGracePeriodSeconds will be used. + Otherwise, this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). This is a beta field + and requires enabling ProbeTerminationGracePeriod feature gate. Minimum + value is 1. spec.terminationGracePeriodSeconds is used if unset. + displayName: Shards Overrides Pods Custom Containers Startup Probe Termination + Grace Period Seconds + path: shards.overrides.pods.customContainers.startupProbe.terminationGracePeriodSeconds + - description: 'Number of seconds after which the probe times out. Defaults + to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + displayName: Shards Overrides Pods Custom Containers Startup Probe Timeout + Seconds + path: shards.overrides.pods.customContainers.startupProbe.timeoutSeconds + - description: Whether this container should allocate a buffer for stdin + in the container runtime. If this is not set, reads from stdin in the + container will always result in EOF. Default is false. + displayName: Shards Overrides Pods Custom Containers Stdin + path: shards.overrides.pods.customContainers.stdin + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Whether the container runtime should close the stdin channel + after it has been opened by a single attach. When stdin is true the + stdin stream will remain open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag is false, a container + processes that reads from stdin will never receive an EOF. Default is + false + displayName: Shards Overrides Pods Custom Containers Stdin Once + path: shards.overrides.pods.customContainers.stdinOnce + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, such + as an assertion failure message. Will be truncated by the node if greater + than 4096 bytes. The total message length across all containers will + be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + displayName: Shards Overrides Pods Custom Containers Termination Message + Path + path: shards.overrides.pods.customContainers.terminationMessagePath + - description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate the + container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination message + file is empty and the container exited with an error. The log output + is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + displayName: Shards Overrides Pods Custom Containers Termination Message + Policy + path: shards.overrides.pods.customContainers.terminationMessagePolicy + - description: Whether this container should allocate a TTY for itself, + also requires 'stdin' to be true. Default is false. + displayName: Shards Overrides Pods Custom Containers Tty + path: shards.overrides.pods.customContainers.tty + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: devicePath is the path inside of the container that the device + will be mapped to. + displayName: Shards Overrides Pods Custom Containers Volume Devices Device + Path + path: shards.overrides.pods.customContainers.volumeDevices.devicePath + - description: name must match the name of a persistentVolumeClaim in the + pod + displayName: Shards Overrides Pods Custom Containers Volume Devices Name + path: shards.overrides.pods.customContainers.volumeDevices.name + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Shards Overrides Pods Custom Containers Volume Mounts Mount + Path + path: shards.overrides.pods.customContainers.volumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Shards Overrides Pods Custom Containers Volume Mounts Mount + Propagation + path: shards.overrides.pods.customContainers.volumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Shards Overrides Pods Custom Containers Volume Mounts Name + path: shards.overrides.pods.customContainers.volumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Shards Overrides Pods Custom Containers Volume Mounts Read + Only + path: shards.overrides.pods.customContainers.volumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Shards Overrides Pods Custom Containers Volume Mounts Sub + Path + path: shards.overrides.pods.customContainers.volumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Shards Overrides Pods Custom Containers Volume Mounts Sub + Path Expr + path: shards.overrides.pods.customContainers.volumeMounts.subPathExpr + - description: Container's working directory. If not specified, the container + runtime's default will be used, which might be configured in the container + image. Cannot be updated. + displayName: Shards Overrides Pods Custom Containers Working Dir + path: shards.overrides.pods.customContainers.workingDir + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Shards Overrides Pods Custom Volume Mounts Mount Path + path: shards.overrides.pods.customVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Shards Overrides Pods Custom Volume Mounts Mount Propagation + path: shards.overrides.pods.customVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Shards Overrides Pods Custom Volume Mounts Name + path: shards.overrides.pods.customVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Shards Overrides Pods Custom Volume Mounts Read Only + path: shards.overrides.pods.customVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Shards Overrides Pods Custom Volume Mounts Sub Path + path: shards.overrides.pods.customVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Shards Overrides Pods Custom Volume Mounts Sub Path Expr + path: shards.overrides.pods.customVolumeMounts.subPathExpr + - description: Path within the container at which the volume should be mounted. Must + not contain ':'. + displayName: Shards Overrides Pods Custom Init Volume Mounts Mount Path + path: shards.overrides.pods.customInitVolumeMounts.mountPath + - description: mountPropagation determines how mounts are propagated from + the host to container and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + displayName: Shards Overrides Pods Custom Init Volume Mounts Mount Propagation + path: shards.overrides.pods.customInitVolumeMounts.mountPropagation + - description: This must match the Name of a Volume. + displayName: Shards Overrides Pods Custom Init Volume Mounts Name + path: shards.overrides.pods.customInitVolumeMounts.name + - description: Mounted read-only if true, read-write otherwise (false or + unspecified). Defaults to false. + displayName: Shards Overrides Pods Custom Init Volume Mounts Read Only + path: shards.overrides.pods.customInitVolumeMounts.readOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Path within the volume from which the container's volume + should be mounted. Defaults to "" (volume's root). + displayName: Shards Overrides Pods Custom Init Volume Mounts Sub Path + path: shards.overrides.pods.customInitVolumeMounts.subPath + - description: Expanded path within the volume from which the container's + volume should be mounted. Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + displayName: Shards Overrides Pods Custom Init Volume Mounts Sub Path + Expr + path: shards.overrides.pods.customInitVolumeMounts.subPathExpr + - description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. It must exist. When not set, a default Postgres + config, for the major version selected, is used. + + ' + displayName: Shards Overrides Configurations SGPostgresConfig + path: shards.overrides.configurations.sgPostgresConfig + - description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. Each pod contains a sidecar with a connection + pooler (currently: [PgBouncer](https://www.pgbouncer.org/)). The connection + pooler is implemented as a sidecar. + + + If not set, a default configuration will be used. Disabling connection + pooling altogether is possible if the disableConnectionPooling property + of the pods object is set to true. + + ' + displayName: Shards Overrides Configurations SGPoolingConfig + path: shards.overrides.configurations.sgPoolingConfig + - description: "The replication mode applied to the whole cluster.\nPossible\ + \ values are:\n* `async` (default)\n* `sync`\n* `strict-sync`\n* `sync-all`\n\ + * `strict-sync-all`\n\n**async**\n\nWhen in asynchronous mode the cluster\ + \ is allowed to lose some committed transactions.\n When the primary\ + \ server fails or becomes unavailable for any other reason a sufficiently\ + \ healthy standby\n will automatically be promoted to primary. Any\ + \ transactions that have not been replicated to that standby\n remain\ + \ in a \"forked timeline\" on the primary, and are effectively unrecoverable\ + \ (the data is still there,\n but recovering it requires a manual recovery\ + \ effort by data recovery specialists).\n\n**sync**\n\nWhen in synchronous\ + \ mode a standby will not be promoted unless it is certain that the\ + \ standby contains all\n transactions that may have returned a successful\ + \ commit status to client (clients can change the behavior\n per transaction\ + \ using PostgreSQL’s `synchronous_commit` setting. Transactions with\ + \ `synchronous_commit`\n values of `off` and `local` may be lost on\ + \ fail over, but will not be blocked by replication delays). This\n\ + \ means that the system may be unavailable for writes even though some\ + \ servers are available. System\n administrators can still use manual\ + \ failover commands to promote a standby even if it results in transaction\n\ + \ loss.\n\nSynchronous mode does not guarantee multi node durability\ + \ of commits under all circumstances. When no suitable\n standby is\ + \ available, primary server will still accept writes, but does not guarantee\ + \ their replication. When\n the primary fails in this mode no standby\ + \ will be promoted. When the host that used to be the primary comes\n\ + \ back it will get promoted automatically, unless system administrator\ + \ performed a manual failover. This behavior\n makes synchronous mode\ + \ usable with 2 node clusters.\n\nWhen synchronous mode is used and\ + \ a standby crashes, commits will block until the primary is switched\ + \ to standalone\n mode. Manually shutting down or restarting a standby\ + \ will not cause a commit service interruption. Standby will\n signal\ + \ the primary to release itself from synchronous standby duties before\ + \ PostgreSQL shutdown is initiated.\n\n**strict-sync**\n\nWhen it is\ + \ absolutely necessary to guarantee that each write is stored durably\ + \ on at least two nodes, use the strict\n synchronous mode. This mode\ + \ prevents synchronous replication to be switched off on the primary\ + \ when no synchronous\n standby candidates are available. As a downside,\ + \ the primary will not be available for writes (unless the Postgres\n\ + \ transaction explicitly turns off `synchronous_mode` parameter), blocking\ + \ all client write requests until at least one\n synchronous replica\ + \ comes up.\n\n**Note**: Because of the way synchronous replication\ + \ is implemented in PostgreSQL it is still possible to lose\n transactions\ + \ even when using strict synchronous mode. If the PostgreSQL backend\ + \ is cancelled while waiting to acknowledge\n replication (as a result\ + \ of packet cancellation due to client timeout or backend failure) transaction\ + \ changes become\n visible for other backends. Such changes are not\ + \ yet replicated and may be lost in case of standby promotion.\n\n**sync-all**\n\ + \nThe same as `sync` but `syncInstances` is ignored and the number of\ + \ synchronous instances is equals to the total number\n of instances\ + \ less one.\n\n**strict-sync-all**\n\nThe same as `strict-sync` but\ + \ `syncInstances` is ignored and the number of synchronous instances\ + \ is equals to the total number\n of instances less one.\n" + displayName: Shards Overrides Replication Mode + path: shards.overrides.replication.mode + - description: "Number of synchronous standby instances. Must be less than\ + \ the total number of instances. It is set to 1 by default.\n Only\ + \ setteable if mode is `sync` or `strict-sync`.\n" + displayName: Shards Overrides Replication Sync Instances + path: shards.overrides.replication.syncInstances + - description: "Allow to specify how the replicas are initialized.\n\nPossible\ + \ values are:\n\n* `FromPrimary`: When this mode is used replicas will\ + \ be always created from the primary using `pg_basebackup`.\n* `FromReplica`:\ + \ When this mode is used replicas will be created from another existing\ + \ replica using\n `pg_basebackup`. Fallsback to `FromPrimary` if there's\ + \ no replica or it fails.\n* `FromExistingBackup`: When this mode is\ + \ used replicas will be created from an existing SGBackup. If `backupNewerThan`\ + \ is set\n the SGBackup must be newer than its value. When this mode\ + \ fails to restore an SGBackup it will try with a previous one (if exists).\n\ + \ Fallsback to `FromReplica` if there's no backup left or it fails.\n\ + * `FromNewlyCreatedBackup`: When this mode is used replicas will be\ + \ created from a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent backup newer than\ + \ its value or it fails.\n" + displayName: Shards Overrides Replication Initialization Mode + path: shards.overrides.replication.initialization.mode + - description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`, that\ + \ specifies how old an SGBackup have to be in order to be seleceted\n\ + \ to initialize a replica.\n\nWhen `FromExistingBackup` mode is set\ + \ this field restrict the selection of SGBackup to be used for recovery\ + \ newer than the\n specified value. \n\nWhen `FromNewlyCreatedBackup`\ + \ mode is set this field skip the creation SGBackup to be used for recovery\ + \ if one newer than\n the specified value exists. \n" + displayName: Shards Overrides Replication Initialization Backup Newer + Than + path: shards.overrides.replication.initialization.backupNewerThan + - description: 'Maximum storage upload bandwidth used when storing a backup. + In bytes (per second). + + ' + displayName: Shards Overrides Replication Initialization Backup Restore + Performance Max Network Bandwidth + path: shards.overrides.replication.initialization.backupRestorePerformance.maxNetworkBandwidth + - description: 'Maximum disk read I/O when performing a backup. In bytes + (per second). + + ' + displayName: Shards Overrides Replication Initialization Backup Restore + Performance Max Disk Bandwidth + path: shards.overrides.replication.initialization.backupRestorePerformance.maxDiskBandwidth + - description: 'Backup storage may use several concurrent streams to read + the data. This parameter configures the number of parallel streams to + use. By default, it''s set to the minimum between the number of file + to read and 10. + + ' + displayName: Shards Overrides Replication Initialization Backup Restore + Performance Download Concurrency + path: shards.overrides.replication.initialization.backupRestorePerformance.downloadConcurrency + - displayName: Shards Overrides Metadata Annotations All Resources + path: shards.overrides.metadata.annotations.allResources + - displayName: Shards Overrides Metadata Annotations Cluster Pods + path: shards.overrides.metadata.annotations.clusterPods + - displayName: Shards Overrides Metadata Annotations Services + path: shards.overrides.metadata.annotations.services + - displayName: Shards Overrides Metadata Annotations Primary Service + path: shards.overrides.metadata.annotations.primaryService + - displayName: Shards Overrides Metadata Annotations Replicas Service + path: shards.overrides.metadata.annotations.replicasService + - displayName: Shards Overrides Metadata Labels Cluster Pods + path: shards.overrides.metadata.labels.clusterPods + - displayName: Shards Overrides Metadata Labels Services + path: shards.overrides.metadata.labels.services + - description: '**Deprecated** use instead .spec.configurations.observability.prometheusAutobind. + + ' + displayName: Prometheus Autobind + path: prometheusAutobind + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Name of the [SGDistributedLogs](https://stackgres.io/doc/latest/reference/crd/sgdistributedlogs/) + to use for this cluster. It must exist. + + ' + displayName: SGDistributedLogs Reference + path: distributedLogs.sgDistributedLogs + - description: "Define a retention window with the syntax ` (minutes|hours|days|months)`\ + \ in which log entries are kept.\n Log entries will be removed when\ + \ they get older more than the double of the specified retention window.\n\ + \nWhen this field is changed the retention will be applied only to log\ + \ entries that are newer than the end of\n the retention window previously\ + \ specified. If no retention window was previously specified it is considered\n\ + \ to be of 7 days. This means that if previous retention window is\ + \ of `7 days` new retention configuration will\n apply after UTC timestamp\ + \ calculated with: `SELECT date_trunc('days', now() at time zone 'UTC')\ + \ - INTERVAL '7 days'`.\n" + displayName: Distributed Logs Retention + path: distributedLogs.retention + - description: 'It is a best practice, on non-containerized environments, + when running production workloads, to run each database server on a + different server (virtual or physical), i.e., not to co-locate more + than one database server per host. + + + The same best practice applies to databases on containers. By default, + StackGres will not allow to run more than one StackGres pod on a given + Kubernetes node. Set this property to true to allow more than one StackGres + pod per node. + + + This property default value may be changed depending on the value of + field `.spec.profile`. + + + This property default value may be changed depending on the value of + field `.spec.profile`. + + ' + displayName: Non Production Options Disable Cluster Pod Anti Affinity + path: nonProductionOptions.disableClusterPodAntiAffinity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It is a best practice, on containerized environments, when + running production workloads, to enforce container''s resources requirements. + + + The same best practice applies to databases on containers. By default, + StackGres will configure resource requirements for patroni container. + Set this property to true to prevent StackGres from setting patroni + container''s resources requirement. + + + This property default value may be changed depending on the value of + field `.spec.profile`. + + ' + displayName: Non Production Options Disable Patroni Resource Requirements + path: nonProductionOptions.disablePatroniResourceRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'It is a best practice, on containerized environments, when + running production workloads, to enforce container''s resources requirements. + + + By default, StackGres will configure resource requirements for all the + containers. Set this property to true to prevent StackGres from setting + container''s resources requirements (except for patroni container, see + `disablePatroniResourceRequirements`). + + + This property default value may be changed depending on the value of + field `.spec.profile`. + + ' + displayName: Non Production Options Disable Cluster Resource Requirements + path: nonProductionOptions.disableClusterResourceRequirements + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's cpu requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ cpu than it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs on the node.\n\ + \nBy default, StackGres will configure cpu requirements to have the\ + \ same limit and request for the patroni container. Set this property\ + \ to true to prevent StackGres from setting patroni container's cpu\ + \ requirements request equals to the limit\n when `.spec.requests.cpu`\ + \ is configured in the referenced `SGInstanceProfile`.\n" + displayName: Non Production Options Enable Set Patroni Cpu Requests + path: nonProductionOptions.enableSetPatroniCpuRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's cpu requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ cpu than it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs on the node.\n\ + \nBy default, StackGres will configure cpu requirements to have the\ + \ same limit and request for all the containers. Set this property to\ + \ true to prevent StackGres from setting container's cpu requirements\ + \ request equals to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..cpu` `.spec.requests.initContainers..cpu` is configured in the referenced `SGInstanceProfile`.\n" + displayName: Non Production Options Enable Set Cluster Cpu Requests + path: nonProductionOptions.enableSetClusterCpuRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's memory requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ memory than it requires.\n\nBy default, StackGres will configure memory\ + \ requirements to have the same limit and request for the patroni container.\ + \ Set this property to true to prevent StackGres from setting patroni\ + \ container's memory requirements request equals to the limit\n when\ + \ `.spec.requests.memory` is configured in the referenced `SGInstanceProfile`.\n" + displayName: Non Production Options Enable Set Patroni Memory Requests + path: nonProductionOptions.enableSetPatroniMemoryRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "**Deprecated** this value is ignored and you can consider\ + \ it as always `true`.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's memory requirements request\ + \ to be equals to the limit allow to achieve the highest level of performance.\ + \ Doing so, reduces the chances of leaving\n the workload with less\ + \ memory than it requires.\n\nBy default, StackGres will configure memory\ + \ requirements to have the same limit and request for all the containers.\ + \ Set this property to true to prevent StackGres from setting container's\ + \ memory requirements request equals to the limit (except for patroni\ + \ container, see `enablePatroniCpuRequests`)\n when `.spec.requests.containers..memory` `.spec.requests.initContainers..memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n" + displayName: Non Production Options Enable Set Cluster Memory Requests + path: nonProductionOptions.enableSetClusterMemoryRequests + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The name of the fature gate to enable. + displayName: Non Production Options Enabled Feature Gates + path: nonProductionOptions.enabledFeatureGates + - description: "When set to the name of an existing [SGShardedBackup](https://stackgres.io/doc/latest/reference/crd/sgshardedbackup),\ + \ the sharded cluster is initialized by restoring the\n backup data\ + \ to it. If not set, the sharded cluster is initialized empty. The selected\ + \ sharded backup must be in the same namespace.\n" + displayName: Initial Data Restore From Backup Name + path: initialData.restore.fromBackup.name + - description: "Specify the [recovery_target_inclusive](https://postgresqlco.nf/doc/en/param/recovery_target_timeline/)\ + \ to stop recovery just after the specified\n recovery target (true),\ + \ or just before the recovery target (false). Applies when targetLsn,\ + \ pointInTimeRecovery, or targetXid is specified. This\n setting controls\ + \ whether transactions having exactly the target WAL location (LSN),\ + \ commit time, or transaction ID, respectively, will be included\n \ + \ in the recovery. Default is true.\n" + displayName: Initial Data Restore From Backup Target Inclusive + path: initialData.restore.fromBackup.targetInclusive + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'An ISO 8601 date, that holds UTC date indicating at which + point-in-time the database have to be restored. + + ' + displayName: Initial Data Restore From Backup Point In Time Recovery Restore + To Timestamp + path: initialData.restore.fromBackup.pointInTimeRecovery.restoreToTimestamp + - description: 'The backup fetch process may fetch several streams in parallel. + Parallel fetching is enabled when set to a value larger than one. + + + If not specified it will be interpreted as latest. + + ' + displayName: Initial Data Restore Download Disk Concurrency + path: initialData.restore.downloadDiskConcurrency + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podCount + - displayName: Replicate From Instance SGCluster Reference + path: '' + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - path: '' + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - displayName: Configurations Backups SGObjectStorage + path: '' + - displayName: Configurations Backups SGObjectStorage + path: '' + statusDescriptors: + - displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + - description: Last time the condition transitioned from one status to another. + displayName: Conditions Last Transition Time + path: conditions.lastTransitionTime + - description: A human readable message indicating details about the transition. + displayName: Conditions Message + path: conditions.message + - description: The reason for the condition's last transition. + displayName: Conditions Reason + path: conditions.reason + - description: Status of the condition, one of True, False, Unknown. + displayName: Conditions Status + path: conditions.status + - description: Type of deployment condition. + displayName: Conditions Type + path: conditions.type + - description: The name of the cluster. + displayName: Cluster Statuses Name + path: clusterStatuses.name + - description: Indicates if the cluster requires restart + displayName: Cluster Statuses Pending Restart + path: clusterStatuses.pendingRestart + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: The name of the extension to install. + displayName: To Install Postgres Extensions Name + path: toInstallPostgresExtensions.name + - description: The id of the publisher of the extension to install. + displayName: To Install Postgres Extensions Publisher + path: toInstallPostgresExtensions.publisher + - description: The version of the extension to install. + displayName: To Install Postgres Extensions Version + path: toInstallPostgresExtensions.version + - description: The repository base URL from where the extension will be + installed from. + displayName: To Install Postgres Extensions Repository + path: toInstallPostgresExtensions.repository + - description: The postgres major version of the extension to install. + displayName: To Install Postgres Extensions Postgres Version + path: toInstallPostgresExtensions.postgresVersion + - description: The build version of the extension to install. + displayName: To Install Postgres Extensions Build + path: toInstallPostgresExtensions.build + - description: The extra mount of the installed extension. + displayName: To Install Postgres Extensions Extra Mounts + path: toInstallPostgresExtensions.extraMounts + - description: The name of the Secret as specified in [Service Binding spec + for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service). + displayName: Binding Name + path: binding.name + - description: 'One of the SGBackups that compose the SGShardedBackup used + to restore the sharded cluster. + + ' + displayName: SGBackups + path: sgBackups + version: v1alpha1 + - description: Day 2 Operations for sharded clusters, including restarts, resharding, + etc + displayName: StackGres Sharded DbOps + kind: SGShardedDbOps + name: sgshardeddbops.stackgres.io + specDescriptors: + - description: 'The name of SGShardedCluster on which the operation will + be performed. + + ' + displayName: Target SGShardedCluster + path: sgShardedCluster + - displayName: Scheduling Node Selector + path: scheduling.nodeSelector + - description: Effect indicates the taint effect to match. Empty means match + all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + displayName: Scheduling Tolerations Effect + path: scheduling.tolerations.effect + - description: Key is the taint key that the toleration applies to. Empty + means match all taint keys. If the key is empty, operator must be Exists; + this combination means to match all values and all keys. + displayName: Scheduling Tolerations Key + path: scheduling.tolerations.key + - description: Operator represents a key's relationship to the value. Valid + operators are Exists and Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate all taints of a particular + category. + displayName: Scheduling Tolerations Operator + path: scheduling.tolerations.operator + - description: TolerationSeconds represents the period of time the toleration + (which must be of effect NoExecute, otherwise this field is ignored) + tolerates the taint. By default, it is not set, which means tolerate + the taint forever (do not evict). Zero and negative values will be treated + as 0 (evict immediately) by the system. + displayName: Scheduling Tolerations Toleration Seconds + path: scheduling.tolerations.tolerationSeconds + - description: Value is the taint value the toleration matches to. If the + operator is Exists, the value should be empty, otherwise just a regular + string. + displayName: Scheduling Tolerations Value + path: scheduling.tolerations.value + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Expressions Key + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Expressions Operator + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Expressions Values + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Fields Key + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Fields Operator + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Preference Match Fields Values + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Scheduling Node Affinity Preferred During Scheduling Ignored + During Execution Weight + path: scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Expressions Key + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Expressions Operator + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Expressions Values + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Fields Key + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Fields Operator + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Scheduling Node Affinity Required During Scheduling Ignored + During Execution Node Selector Terms Match Fields Values + path: scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Scheduling Priority Class Name + path: scheduling.priorityClassName + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Expressions + Key + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Expressions + Operator + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Expressions + Values + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Label Selector Match Labels + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Match Label Keys + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Mismatch Label Keys + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Expressions + Key + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Expressions + Operator + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Expressions + Values + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespace Selector Match Labels + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Namespaces + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Pod Affinity Term Topology Key + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Scheduling Pod Affinity Preferred During Scheduling Ignored + During Execution Weight + path: scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Key + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Operator + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Values + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Labels + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Match Label Keys + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Mismatch Label Keys + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Key + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Operator + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Values + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Labels + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespaces + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Topology Key + path: scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Key + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Operator + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Values + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Labels + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Match Label Keys + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Mismatch Label Keys + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Key + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Operator + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Values + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Labels + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespaces + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Topology Key + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Weight + path: scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Key + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Operator + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Values + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Label Selector Match Labels + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Match Label Keys + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Mismatch Label Keys + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Key + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Operator + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Values + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Labels + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Namespaces + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Scheduling Pod Anti Affinity Required During Scheduling Ignored + During Execution Topology Key + path: scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: 'The kind of operation that will be performed on the SGCluster. + Available operations are: + + + * `resharding`: perform a resharding of the cluster. + + * `restart`: perform a restart of the cluster. + + * `securityUpgrade`: perform a security upgrade of the cluster. + + ' + displayName: Op + path: op + - description: 'An ISO 8601 date, that holds UTC scheduled date of the operation + execution. + + + If not specified or if the date it''s in the past, it will be interpreted + ASAP. + + ' + displayName: Run At + path: runAt + - description: 'An ISO 8601 duration in the format `PnDTnHnMn.nS`, that + specifies a timeout after which the operation execution will be canceled. + + + If the operation can not be performed due to timeout expiration, the + condition `Failed` will have a status of `True` and the reason will + be `OperationTimedOut`. + + + If not specified the operation will never fail for timeout expiration. + + ' + displayName: Timeout + path: timeout + - description: 'The maximum number of retries the operation is allowed to + do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: `0`. + + ' + displayName: Max Retries + path: maxRetries + - description: 'A float number between 0.0 and 1.0 which indicates the maximum + difference ratio of node utilization from average utilization. + + See also https://docs.citusdata.com/en/stable/develop/api_udf.html#citus-rebalance-start + + ' + displayName: Resharding Citus Drain Only + path: resharding.citus.drainOnly + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The name of a strategy in Rebalancer strategy table. Will + pick a default one if not specified + + See also https://docs.citusdata.com/en/stable/develop/api_udf.html#citus-rebalance-start + + ' + displayName: Resharding Citus Rebalance Strategy + path: resharding.citus.rebalanceStrategy + - description: "The method used to perform the restart operation. Available\ + \ methods are:\n\n* `InPlace`: the in-place method does not require\ + \ more resources than those that are available.\n In case only an instance\ + \ of the StackGres cluster for the coordinator or any shard is present\n\ + \ this mean the service disruption will last longer so we encourage\ + \ use the reduced impact restart\n and especially for a production\ + \ environment.\n* `ReducedImpact`: this procedure is the same as the\ + \ in-place method but require additional\n resources in order to spawn\ + \ a new updated replica that will be removed when the procedure completes.\n" + displayName: Restart Method + path: restart.method + - description: "By default all Pods are restarted. Setting this option to\ + \ `true` allow to restart only those Pods which\n are in pending restart\ + \ state as detected by the operation. Defaults to: `false`.\n" + displayName: Restart Only Pending Restart + path: restart.onlyPendingRestart + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "The method used to perform the security upgrade operation.\ + \ Available methods are:\n\n* `InPlace`: the in-place method does not\ + \ require more resources than those that are available.\n In case only\ + \ an instance of the StackGres cluster is present this mean the service\ + \ disruption will\n last longer so we encourage use the reduced impact\ + \ restart and especially for a production environment.\n* `ReducedImpact`:\ + \ this procedure is the same as the in-place method but require additional\n\ + \ resources in order to spawn a new updated replica that will be removed\ + \ when the procedure completes.\n" + displayName: Security Upgrade Method + path: securityUpgrade.method + statusDescriptors: + - displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + - description: Last time the condition transitioned from one status to another. + displayName: Conditions Last Transition Time + path: conditions.lastTransitionTime + - description: A human-readable message indicating details about the transition. + displayName: Conditions Message + path: conditions.message + - description: The reason for the condition last transition. + displayName: Conditions Reason + path: conditions.reason + - description: Status of the condition, one of `True`, `False` or `Unknown`. + displayName: Conditions Status + path: conditions.status + - description: Type of deployment condition. + displayName: Conditions Type + path: conditions.type + - description: 'The number of retries performed by the operation + + ' + displayName: Op Retries + path: opRetries + - description: 'The ISO 8601 timestamp of when the operation started running + + ' + displayName: Op Started + path: opStarted + - displayName: Restart Pending To Restart SGClusters + path: restart.pendingToRestartSgClusters + - displayName: Restart Restarted SGClusters + path: restart.restartedSgClusters + - description: 'A failure message (when available) + + ' + displayName: Restart Failure + path: restart.failure + - displayName: Security Upgrade Pending To Restart SGClusters + path: securityUpgrade.pendingToRestartSgClusters + - displayName: Security Upgrade Restarted SGClusters + path: securityUpgrade.restartedSgClusters + - description: 'A failure message (when available) + + ' + displayName: Security Upgrade Failure + path: securityUpgrade.failure + version: v1 + - description: Stream Change Data Captured (CDC) events + displayName: StackGres Stream + kind: SGStream + name: sgstreams.stackgres.io + specDescriptors: + - description: 'The type of data source. Available data source types are: + + + * `SGCluster`: an SGCluster in the same namespace + + * `Postgres`: any Postgres instance + + ' + displayName: Source Type + path: source.type + - description: 'The target SGCluster name. + + ' + displayName: Source SGCluster Name + path: source.sgCluster.name + - description: 'The target database name to which the CDC process will connect + to. + + + If not specified the default postgres database will be targeted. + + ' + displayName: Source SGCluster Database + path: source.sgCluster.database + - description: 'The username used by the CDC process to connect to the database. + + + If not specified the default superuser username (by default postgres) + will be used. + + ' + displayName: Source SGCluster Username + path: source.sgCluster.username + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The Secret name where the username is stored. + + ' + displayName: Source SGCluster Username Name + path: source.sgCluster.username.name + - description: 'The Secret key where the username is stored. + + ' + displayName: Source SGCluster Username Key + path: source.sgCluster.username.key + - description: 'The password used by the CDC process to connect to the database. + + + If not specified the default superuser password will be used. + + ' + displayName: Source SGCluster Password + path: source.sgCluster.password + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Secret + - description: 'The Secret name where the password is stored. + + ' + displayName: Source SGCluster Password Name + path: source.sgCluster.password.name + - description: 'The Secret key where the password is stored. + + ' + displayName: Source SGCluster Password Key + path: source.sgCluster.password.key + - description: 'A regular expressions that allow to match one or more `..` + entries to be filtered before sending to the target. + + ' + displayName: Source SGCluster Includes + path: source.sgCluster.includes + - description: 'A regular expressions that allow to match one or more `.
.` + entries to be filtered out before sending to the target. + + ' + displayName: Source SGCluster Excludes + path: source.sgCluster.excludes + - description: 'Default `pgoutput`. The name of the [PostgreSQL logical + decoding plug-in](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-output-plugin) + installed on the PostgreSQL server. Supported values are decoderbufs, + and pgoutput. + + ' + displayName: Source SGCluster Debezium Properties Plugin Name + path: source.sgCluster.debeziumProperties.pluginName + - description: 'Default . (with all characters + that are not `[a-zA-Z0-9]` changed to `_` character). The name of the + PostgreSQL logical decoding slot that was created for streaming changes + from a particular plug-in for a particular database/schema. The server + uses this slot to stream events to the Debezium connector that you are + configuring. + + + Slot names must conform to [PostgreSQL replication slot naming rules](https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION), + which state: "Each replication slot has a name, which can contain lower-case + letters, numbers, and the underscore character." + + ' + displayName: Source SGCluster Debezium Properties Slot Name + path: source.sgCluster.debeziumProperties.slotName + - description: 'Default `true`. Whether or not to delete the logical replication + slot when the connector stops in a graceful, expected way. The default + behavior is that the replication slot remains configured for the connector + when the connector stops. When the connector restarts, having the same + replication slot enables the connector to start processing where it + left off. Set to true in only testing or development environments. Dropping + the slot allows the database to discard WAL segments. When the connector + restarts it performs a new snapshot or it can continue from a persistent + offset in the Kafka Connect offsets topic. + + ' + displayName: Source SGCluster Debezium Properties Slot Drop On Stop + path: source.sgCluster.debeziumProperties.slotDropOnStop + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default . (with all characters + that are not `[a-zA-Z0-9]` changed to `_` character). The name of the + PostgreSQL publication created for streaming changes when using pgoutput. + This publication is created at start-up if it does not already exist + and it includes all tables. Debezium then applies its own include/exclude + list filtering, if configured, to limit the publication to change events + for the specific tables of interest. The connector user must have superuser + permissions to create this publication, so it is usually preferable + to create the publication before starting the connector for the first + time. If the publication already exists, either for all tables or configured + with a subset of tables, Debezium uses the publication as it is defined. + + ' + displayName: Source SGCluster Debezium Properties Publication Name + path: source.sgCluster.debeziumProperties.publicationName + - description: 'Default `false`. Specifies whether to skip publishing messages + when there is no change in included columns. This would essentially + filter messages if there is no change in columns included as per includes + or excludes fields. Note: Only works when REPLICA IDENTITY of the table + is set to FULL + + ' + displayName: Source SGCluster Debezium Properties Skip Messages Without + Change + path: source.sgCluster.debeziumProperties.skipMessagesWithoutChange + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `adaptive`. Time, date, and timestamps can be represented + with different kinds of precision: + + + * `adaptive`: captures the time and timestamp values exactly as in the + database using either millisecond, microsecond, or nanosecond precision + values based on the database column’s type. + + * `adaptive_time_microseconds`: captures the date, datetime and timestamp + values exactly as in the database using either millisecond, microsecond, + or nanosecond precision values based on the database column’s type. + An exception is TIME type fields, which are always captured as microseconds. + + * `connect`: always represents time and timestamp values by using Kafka + Connect’s built-in representations for Time, Date, and Timestamp, which + use millisecond precision regardless of the database columns'' precision. + For more information, see [temporal values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-temporal-types). + + ' + displayName: Source SGCluster Debezium Properties Time Precision Mode + path: source.sgCluster.debeziumProperties.timePrecisionMode + - description: 'Default `precise`. Specifies how the connector should handle + values for DECIMAL and NUMERIC columns: + + + * `precise`: represents values by using java.math.BigDecimal to represent + values in binary form in change events. + + * `double`: represents values by using double values, which might result + in a loss of precision but which is easier to use. + + * `string`: encodes values as formatted strings, which are easy to consume + but semantic information about the real type is lost. For more information, + see [Decimal types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-decimal-types). + + ' + displayName: Source SGCluster Debezium Properties Decimal Handling Mode + path: source.sgCluster.debeziumProperties.decimalHandlingMode + - description: 'Default `json`. Specifies how the connector should handle + values for hstore columns: + + + * `map`: represents values by using MAP. + + * `json`: represents values by using json string. This setting encodes + values as formatted strings such as {"key" : "val"}. For more information, + see [PostgreSQL HSTORE type](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-hstore-type). + + ' + displayName: Source SGCluster Debezium Properties Hstore Handling Mode + path: source.sgCluster.debeziumProperties.hstoreHandlingMode + - description: "Default `numeric`. Specifies how the connector should handle\ + \ values for interval columns:\n\n * `numeric`: represents intervals\ + \ using approximate number of microseconds.\n * `string`: represents\ + \ intervals exactly by using the string pattern representation PYMDTHMS.\ + \ For example: P1Y2M3DT4H5M6.78S. For more information, see [PostgreSQL\ + \ basic types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-basic-types).\n" + displayName: Source SGCluster Debezium Properties Interval Handling Mode + path: source.sgCluster.debeziumProperties.intervalHandlingMode + - description: 'Default `true`. Controls whether a delete event is followed + by a tombstone event. + + + * `true` - a delete operation is represented by a delete event and a + subsequent tombstone event. + + * `false` - only a delete event is emitted. + + + After a source record is deleted, emitting a tombstone event (the default + behavior) allows Kafka to completely delete all events that pertain + to the key of the deleted row in case [log compaction](https://kafka.apache.org/documentation/#compaction) + is enabled for the topic. + + ' + displayName: Source SGCluster Debezium Properties Tombstones On Delete + path: source.sgCluster.debeziumProperties.tombstonesOnDelete + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Source SGCluster Debezium Properties Column Truncate To Length + Chars + path: source.sgCluster.debeziumProperties.columnTruncateToLengthChars + - displayName: Source SGCluster Debezium Properties Column Mask With Length + Chars + path: source.sgCluster.debeziumProperties.columnMaskWithLengthChars + - displayName: Source SGCluster Debezium Properties Column Mask Hash + path: source.sgCluster.debeziumProperties.columnMaskHash + - displayName: Source SGCluster Debezium Properties Column Mask Hash V2 + path: source.sgCluster.debeziumProperties.columnMaskHashV2 + - displayName: Source SGCluster Debezium Properties Column Propagate Source + Type + path: source.sgCluster.debeziumProperties.columnPropagateSourceType + - displayName: Source SGCluster Debezium Properties Datatype Propagate Source + Type + path: source.sgCluster.debeziumProperties.datatypePropagateSourceType + - displayName: Source SGCluster Debezium Properties Message Key Columns + path: source.sgCluster.debeziumProperties.messageKeyColumns + - description: 'Default `all_tables`. Applies only when streaming changes + by using [the pgoutput plug-in](https://www.postgresql.org/docs/current/sql-createpublication.html). + The setting determines how creation of a [publication](https://www.postgresql.org/docs/current/logical-replication-publication.html) + should work. Specify one of the following values: + + + * `all_tables` - If a publication exists, the connector uses it. If + a publication does not exist, the connector creates a publication for + all tables in the database for which the connector is capturing changes. + For the connector to create a publication it must access the database + through a database user account that has permission to create publications + and perform replications. You grant the required permission by using + the following SQL command CREATE PUBLICATION FOR + ALL TABLES;. + + * `disabled` - The connector does not attempt to create a publication. + A database administrator or the user configured to perform replications + must have created the publication before running the connector. If the + connector cannot find the publication, the connector throws an exception + and stops. + + * `filtered` - If a publication exists, the connector uses it. If no + publication exists, the connector creates a new publication for tables + that match the current filter configuration as specified by the schema.include.list, + schema.exclude.list, and table.include.list, and table.exclude.list + connector configuration properties. For example: CREATE PUBLICATION + FOR TABLE . If the publication + exists, the connector updates the publication for tables that match + the current filter configuration. For example: ALTER PUBLICATION + SET TABLE . + + ' + displayName: Source SGCluster Debezium Properties Publication Autocreate + Mode + path: source.sgCluster.debeziumProperties.publicationAutocreateMode + - displayName: Source SGCluster Debezium Properties Replica Identity Autoset + Values + path: source.sgCluster.debeziumProperties.replicaIdentityAutosetValues + - description: 'Default `bytes`. Specifies how binary (bytea) columns should + be represented in change events: + + + * `bytes` represents binary data as byte array. + + * `base64` represents binary data as base64-encoded strings. + + * `base64-url-safe` represents binary data as base64-url-safe-encoded + strings. + + * `hex` represents binary data as hex-encoded (base16) strings. + + ' + displayName: Source SGCluster Debezium Properties Binary Handling Mode + path: source.sgCluster.debeziumProperties.binaryHandlingMode + - description: 'Default `none`. Specifies how schema names should be adjusted + for compatibility with the message converter used by the connector. + Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used in the Avro type + name with underscore. + + * `avro_unicode` replaces the underscore or characters that cannot be + used in the Avro type name with corresponding unicode like _uxxxx. Note: + _ is an escape sequence like backslash in Java + + ' + displayName: Source SGCluster Debezium Properties Schema Name Adjustment + Mode + path: source.sgCluster.debeziumProperties.schemaNameAdjustmentMode + - description: 'Default `none`. Specifies how field names should be adjusted + for compatibility with the message converter used by the connector. + Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used in the Avro type + name with underscore. + + * `avro_unicode` replaces the underscore or characters that cannot be + used in the Avro type name with corresponding unicode like _uxxxx. Note: + _ is an escape sequence like backslash in Java + + + For more information, see [Avro naming](https://debezium.io/documentation/reference/stable/configuration/avro.html#avro-naming). + + ' + displayName: Source SGCluster Debezium Properties Field Name Adjustment + Mode + path: source.sgCluster.debeziumProperties.fieldNameAdjustmentMode + - description: 'Default `2`. Specifies how many decimal digits should be + used when converting Postgres money type to java.math.BigDecimal, which + represents the values in change events. Applicable only when decimalHandlingMode + is set to precise. + + ' + displayName: Source SGCluster Debezium Properties Money Fraction Digits + path: source.sgCluster.debeziumProperties.moneyFractionDigits + - displayName: Source SGCluster Debezium Properties Converters + path: source.sgCluster.debeziumProperties.converters + - description: "Default `initial`. Specifies the criteria for performing\ + \ a snapshot when the connector starts:\n\n* `always` - The connector\ + \ performs a snapshot every time that it starts. The snapshot includes\ + \ the structure and data of the captured tables. Specify this value\ + \ to populate topics with a complete representation of the data from\ + \ the captured tables every time that the connector starts. After the\ + \ snapshot completes, the connector begins to stream event records for\ + \ subsequent database changes.\n* `initial` - The connector performs\ + \ a snapshot only when no offsets have been recorded for the logical\ + \ server name.\n* `initial_only` - The connector performs an initial\ + \ snapshot and then stops, without processing any subsequent changes.\n\ + * `no_data` - The connector never performs snapshots. When a connector\ + \ is configured this way, after it starts, it behaves as follows: If\ + \ there is a previously stored LSN in the Kafka offsets topic, the connector\ + \ continues streaming changes from that position. If no LSN is stored,\ + \ the connector starts streaming changes from the point in time when\ + \ the PostgreSQL logical replication slot was created on the server.\ + \ Use this snapshot mode only when you know all data of interest is\ + \ still reflected in the WAL.\n* `never` - Deprecated see no_data.\n\ + * `when_needed` - After the connector starts, it performs a snapshot\ + \ only if it detects one of the following circumstances: \n It cannot\ + \ detect any topic offsets.\n A previously recorded offset specifies\ + \ a log position that is not available on the server.\n* `configuration_based`\ + \ - With this option, you control snapshot behavior through a set of\ + \ connector properties that have the prefix 'snapshotModeConfigurationBased'.\n\ + * `custom` - The connector performs a snapshot according to the implementation\ + \ specified by the snapshotModeCustomName property, which defines a\ + \ custom implementation of the io.debezium.spi.snapshot.Snapshotter\ + \ interface.\n\nFor more information, see the [table of snapshot.mode\ + \ options](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-connector-snapshot-mode-options).\n" + displayName: Source SGCluster Debezium Properties Snapshot Mode + path: source.sgCluster.debeziumProperties.snapshotMode + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector includes table data + when it performs a snapshot. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Mode Configuration + Based Snapshot Data + path: source.sgCluster.debeziumProperties.snapshotModeConfigurationBasedSnapshotData + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector includes the table + schema when it performs a snapshot. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Mode Configuration + Based Snapshot Schema + path: source.sgCluster.debeziumProperties.snapshotModeConfigurationBasedSnapshotSchema + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector begins to stream + change events after a snapshot completes. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Mode Configuration + Based Start Stream + path: source.sgCluster.debeziumProperties.snapshotModeConfigurationBasedStartStream + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector includes table schema + in a snapshot if the schema history topic is not available. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Mode Configuration + Based Snapshot On Schema Error + path: source.sgCluster.debeziumProperties.snapshotModeConfigurationBasedSnapshotOnSchemaError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + this property specifies whether the connector attempts to snapshot table + data if it does not find the last committed offset in the transaction + log. Set the value to true to instruct the connector to perform a new + snapshot. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Mode Configuration + Based Snapshot On Data Error + path: source.sgCluster.debeziumProperties.snapshotModeConfigurationBasedSnapshotOnDataError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When snapshotMode is set as custom, use this setting to + specify the name of the custom implementation provided in the name() + method that is defined by the ''io.debezium.spi.snapshot.Snapshotter'' + interface. The provided implementation is called after a connector restart + to determine whether to perform a snapshot. For more information, see + [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + displayName: Source SGCluster Debezium Properties Snapshot Mode Custom + Name + path: source.sgCluster.debeziumProperties.snapshotModeCustomName + - description: 'Default `none`. Specifies how the connector holds locks + on tables while performing a schema snapshot. Set one of the following + options: + + + * `shared`: The connector holds a table lock that prevents exclusive + table access during the initial portion phase of the snapshot in which + database schemas and other metadata are read. After the initial phase, + the snapshot no longer requires table locks. + + * `none`: The connector avoids locks entirely. Do not use this mode + if schema changes might occur during the snapshot. + + + > *WARNING*: Do not use this mode if schema changes might occur during + the snapshot. + + + * `custom`: The connector performs a snapshot according to the implementation + specified by the snapshotLockingModeCustomName property, which is a + custom implementation of the io.debezium.spi.snapshot.SnapshotLock interface. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Locking Mode + path: source.sgCluster.debeziumProperties.snapshotLockingMode + - description: 'When snapshotLockingMode is set to custom, use this setting + to specify the name of the custom implementation provided in the name() + method that is defined by the ''io.debezium.spi.snapshot.SnapshotLock'' + interface. For more information, see [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + displayName: Source SGCluster Debezium Properties Snapshot Locking Mode + Custom Name + path: source.sgCluster.debeziumProperties.snapshotLockingModeCustomName + - description: 'Default `select_all`. Specifies how the connector queries + data while performing a snapshot. Set one of the following options: + + + * `select_all`: The connector performs a select all query by default, + optionally adjusting the columns selected based on the column include + and exclude list configurations. + + * `custom`: The connector performs a snapshot query according to the + implementation specified by the snapshotQueryModeCustomName property, + which defines a custom implementation of the io.debezium.spi.snapshot.SnapshotQuery + interface. This setting enables you to manage snapshot content in a + more flexible manner compared to using the snapshotSelectStatementOverrides + property. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Query Mode + path: source.sgCluster.debeziumProperties.snapshotQueryMode + - description: 'When snapshotQueryMode is set as custom, use this setting + to specify the name of the custom implementation provided in the name() + method that is defined by the ''io.debezium.spi.snapshot.SnapshotQuery'' + interface. For more information, see [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + displayName: Source SGCluster Debezium Properties Snapshot Query Mode + Custom Name + path: source.sgCluster.debeziumProperties.snapshotQueryModeCustomName + - displayName: Source SGCluster Debezium Properties Snapshot Include Collection + List + path: source.sgCluster.debeziumProperties.snapshotIncludeCollectionList + - description: 'Default `10000`. Positive integer value that specifies the + maximum amount of time (in milliseconds) to wait to obtain table locks + when performing a snapshot. If the connector cannot acquire table locks + in this time interval, the snapshot fails. [How the connector performs + snapshots](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-snapshots) + provides details. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Lock Timeout + Ms + path: source.sgCluster.debeziumProperties.snapshotLockTimeoutMs + - displayName: Source SGCluster Debezium Properties Snapshot Select Statement + Overrides + path: source.sgCluster.debeziumProperties.snapshotSelectStatementOverrides + - description: 'Default `fail`. Specifies how the connector should react + to exceptions during processing of events: + + + * `fail`: propagates the exception, indicates the offset of the problematic + event, and causes the connector to stop. + + * `warn`: logs the offset of the problematic event, skips that event, + and continues processing. + + * `skip`: skips the problematic event and continues processing. + + ' + displayName: Source SGCluster Debezium Properties Event Processing Failure + Handling Mode + path: source.sgCluster.debeziumProperties.eventProcessingFailureHandlingMode + - description: 'Default `2048`. Positive integer value that specifies the + maximum size of each batch of events that the connector processes. + + ' + displayName: Source SGCluster Debezium Properties Max Batch Size + path: source.sgCluster.debeziumProperties.maxBatchSize + - description: 'Default `8192`. Positive integer value that specifies the + maximum number of records that the blocking queue can hold. When Debezium + reads events streamed from the database, it places the events in the + blocking queue before it writes them to Kafka. The blocking queue can + provide backpressure for reading change events from the database in + cases where the connector ingests messages faster than it can write + them to Kafka, or when Kafka becomes unavailable. Events that are held + in the queue are disregarded when the connector periodically records + offsets. Always set the value of maxQueueSize to be larger than the + value of maxBatchSize. + + ' + displayName: Source SGCluster Debezium Properties Max Queue Size + path: source.sgCluster.debeziumProperties.maxQueueSize + - description: "Default `0`. A long integer value that specifies the maximum\ + \ volume of the blocking queue in bytes. By default, volume limits are\ + \ not specified for the blocking queue. To specify the number of bytes\ + \ that the queue can consume, set this property to a positive long value.\n\ + \ If maxQueueSize is also set, writing to the queue is blocked when\ + \ the size of the queue reaches the limit specified by either property.\ + \ For example, if you set maxQueueSize=1000, and maxQueueSizeInBytes=5000,\ + \ writing to the queue is blocked after the queue contains 1000 records,\ + \ or after the volume of the records in the queue reaches 5000 bytes.\n" + displayName: Source SGCluster Debezium Properties Max Queue Size In Bytes + path: source.sgCluster.debeziumProperties.maxQueueSizeInBytes + - description: 'Default `500`. Positive integer value that specifies the + number of milliseconds the connector should wait for new change events + to appear before it starts processing a batch of events. Defaults to + 500 milliseconds. + + ' + displayName: Source SGCluster Debezium Properties Poll Interval Ms + path: source.sgCluster.debeziumProperties.pollIntervalMs + - description: "Default `true`. Specifies connector behavior when the connector\ + \ encounters a field whose data type is unknown. The default behavior\ + \ is that the connector omits the field from the change event and logs\ + \ a warning.\n Set this property to true if you want the change event\ + \ to contain an opaque binary representation of the field. This lets\ + \ consumers decode the field. You can control the exact representation\ + \ by setting the binaryHandlingMode property.\n> *NOTE*: Consumers risk\ + \ backward compatibility issues when `includeUnknownDatatypes` is set\ + \ to `true`. Not only may the database-specific binary representation\ + \ change between releases, but if the data type is eventually supported\ + \ by Debezium, the data type will be sent downstream in a logical type,\ + \ which would require adjustments by consumers. In general, when encountering\ + \ unsupported data types, create a feature request so that support can\ + \ be added.\n" + displayName: Source SGCluster Debezium Properties Include Unknown Datatypes + path: source.sgCluster.debeziumProperties.includeUnknownDatatypes + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Source SGCluster Debezium Properties Database Initial Statements + path: source.sgCluster.debeziumProperties.databaseInitialStatements + - description: 'Default `10000`. Frequency for sending replication connection + status updates to the server, given in milliseconds. The property also + controls how frequently the database status is checked to detect a dead + connection in case the database was shut down. + + ' + displayName: Source SGCluster Debezium Properties Status Update Interval + Ms + path: source.sgCluster.debeziumProperties.statusUpdateIntervalMs + - description: "Default `0`. Controls how frequently the connector sends\ + \ heartbeat messages to a Kafka topic. The default behavior is that\ + \ the connector does not send heartbeat messages.\n Heartbeat messages\ + \ are useful for monitoring whether the connector is receiving change\ + \ events from the database. Heartbeat messages might help decrease the\ + \ number of change events that need to be re-sent when a connector restarts.\ + \ To send heartbeat messages, set this property to a positive integer,\ + \ which indicates the number of milliseconds between heartbeat messages.\n\ + \ Heartbeat messages are needed when there are many updates in a database\ + \ that is being tracked but only a tiny number of updates are related\ + \ to the table(s) and schema(s) for which the connector is capturing\ + \ changes. In this situation, the connector reads from the database\ + \ transaction log as usual but rarely emits change records to Kafka.\ + \ This means that no offset updates are committed to Kafka and the connector\ + \ does not have an opportunity to send the latest retrieved LSN to the\ + \ database. The database retains WAL files that contain events that\ + \ have already been processed by the connector. Sending heartbeat messages\ + \ enables the connector to send the latest retrieved LSN to the database,\ + \ which allows the database to reclaim disk space being used by no longer\ + \ needed WAL files.\n" + displayName: Source SGCluster Debezium Properties Heartbeat Interval Ms + path: source.sgCluster.debeziumProperties.heartbeatIntervalMs + - description: "Specifies a query that the connector executes on the source\ + \ database when the connector sends a heartbeat message.\n This is useful\ + \ for resolving the situation described in [WAL disk space consumption](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-wal-disk-space),\ + \ where capturing changes from a low-traffic database on the same host\ + \ as a high-traffic database prevents Debezium from processing WAL records\ + \ and thus acknowledging WAL positions with the database. To address\ + \ this situation, create a heartbeat table in the low-traffic database,\ + \ and set this property to a statement that inserts records into that\ + \ table, for example:\n\n ```\n INSERT INTO test_heartbeat_table (text)\ + \ VALUES ('test_heartbeat')\n ```\n \n This allows the connector to\ + \ receive changes from the low-traffic database and acknowledge their\ + \ LSNs, which prevents unbounded WAL growth on the database host.\n" + displayName: Source SGCluster Debezium Properties Heartbeat Action Query + path: source.sgCluster.debeziumProperties.heartbeatActionQuery + - description: 'Default `columns_diff`. Specify the conditions that trigger + a refresh of the in-memory schema for a table. + + + * `columns_diff`: is the safest mode. It ensures that the in-memory + schema stays in sync with the database table’s schema at all times. + + * `columns_diff_exclude_unchanged_toast`: instructs the connector to + refresh the in-memory schema cache if there is a discrepancy with the + schema derived from the incoming message, unless unchanged TOASTable + data fully accounts for the discrepancy. + + + This setting can significantly improve connector performance if there + are frequently-updated tables that have TOASTed data that are rarely + part of updates. However, it is possible for the in-memory schema to + become outdated if TOASTable columns are dropped from the table. + + ' + displayName: Source SGCluster Debezium Properties Schema Refresh Mode + path: source.sgCluster.debeziumProperties.schemaRefreshMode + - description: 'An interval in milliseconds that the connector should wait + before performing a snapshot when the connector starts. If you are starting + multiple connectors in a cluster, this property is useful for avoiding + snapshot interruptions, which might cause re-balancing of connectors. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Delay Ms + path: source.sgCluster.debeziumProperties.snapshotDelayMs + - description: 'Default `10240`. During a snapshot, the connector reads + table content in batches of rows. This property specifies the maximum + number of rows in a batch. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Fetch Size + path: source.sgCluster.debeziumProperties.snapshotFetchSize + - displayName: Source SGCluster Debezium Properties Slot Stream Params + path: source.sgCluster.debeziumProperties.slotStreamParams + - description: 'Default `6`. If connecting to a replication slot fails, + this is the maximum number of consecutive attempts to connect. + + ' + displayName: Source SGCluster Debezium Properties Slot Max Retries + path: source.sgCluster.debeziumProperties.slotMaxRetries + - description: 'Default `10000` (10 seconds). The number of milliseconds + to wait between retry attempts when the connector fails to connect to + a replication slot. + + ' + displayName: Source SGCluster Debezium Properties Slot Retry Delay Ms + path: source.sgCluster.debeziumProperties.slotRetryDelayMs + - description: 'Default `__debezium_unavailable_value`. Specifies the constant + that the connector provides to indicate that the original value is a + toasted value that is not provided by the database. If the setting of + unavailable.value.placeholder starts with the hex: prefix it is expected + that the rest of the string represents hexadecimally encoded octets. + For more information, see [toasted values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-toasted-values). + + ' + displayName: Source SGCluster Debezium Properties Unavailable Value Placeholder + path: source.sgCluster.debeziumProperties.unavailableValuePlaceholder + - description: 'Default `false`. Determines whether the connector generates + events with transaction boundaries and enriches change event envelopes + with transaction metadata. Specify true if you want the connector to + do this. For more information, see [Transaction metadata](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-transaction-metadata). + + ' + displayName: Source SGCluster Debezium Properties Provide Transaction + Metadata + path: source.sgCluster.debeziumProperties.provideTransactionMetadata + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `true`. Determines whether the connector should + commit the LSN of the processed records in the source postgres database + so that the WAL logs can be deleted. Specify false if you don’t want + the connector to do this. Please note that if set to false LSN will + not be acknowledged by Debezium and as a result WAL logs will not be + cleared which might result in disk space issues. User is expected to + handle the acknowledgement of LSN outside Debezium. + + ' + displayName: Source SGCluster Debezium Properties Flush Lsn Source + path: source.sgCluster.debeziumProperties.flushLsnSource + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `10000` (10 seconds). The number of milliseconds + to wait before restarting a connector after a retriable error occurs. + + ' + displayName: Source SGCluster Debezium Properties Retriable Restart Connector + Wait Ms + path: source.sgCluster.debeziumProperties.retriableRestartConnectorWaitMs + - displayName: Source SGCluster Debezium Properties Skipped Operations + path: source.sgCluster.debeziumProperties.skippedOperations + - description: 'Fully-qualified name of the data collection that is used + to send signals to the connector. Use the following format to specify + the collection name: . + + ' + displayName: Source SGCluster Debezium Properties Signal Data Collection + path: source.sgCluster.debeziumProperties.signalDataCollection + - displayName: Source SGCluster Debezium Properties Signal Enabled Channels + path: source.sgCluster.debeziumProperties.signalEnabledChannels + - displayName: Source SGCluster Debezium Properties Notification Enabled + Channels + path: source.sgCluster.debeziumProperties.notificationEnabledChannels + - description: 'Default `1024`. The maximum number of rows that the connector + fetches and reads into memory during an incremental snapshot chunk. + Increasing the chunk size provides greater efficiency, because the snapshot + runs fewer snapshot queries of a greater size. However, larger chunk + sizes also require more memory to buffer the snapshot data. Adjust the + chunk size to a value that provides the best performance in your environment. + + ' + displayName: Source SGCluster Debezium Properties Incremental Snapshot + Chunk Size + path: source.sgCluster.debeziumProperties.incrementalSnapshotChunkSize + - description: 'Default `insert_insert`. Specifies the watermarking mechanism + that the connector uses during an incremental snapshot to deduplicate + events that might be captured by an incremental snapshot and then recaptured + after streaming resumes. + + + You can specify one of the following options: + + + * `insert_insert`: When you send a signal to initiate an incremental + snapshot, for every chunk that Debezium reads during the snapshot, it + writes an entry to the signaling data collection to record the signal + to open the snapshot window. After the snapshot completes, Debezium + inserts a second entry to record the closing of the window. + + * `insert_delete`: When you send a signal to initiate an incremental + snapshot, for every chunk that Debezium reads, it writes a single entry + to the signaling data collection to record the signal to open the snapshot + window. After the snapshot completes, this entry is removed. No entry + is created for the signal to close the snapshot window. Set this option + to prevent rapid growth of the signaling data collection. + + ' + displayName: Source SGCluster Debezium Properties Incremental Snapshot + Watermarking Strategy + path: source.sgCluster.debeziumProperties.incrementalSnapshotWatermarkingStrategy + - description: 'Default `0`. How often, in milliseconds, the XMIN will be + read from the replication slot. The XMIN value provides the lower bounds + of where a new replication slot could start from. The default value + of 0 disables tracking XMIN tracking. + + ' + displayName: Source SGCluster Debezium Properties Xmin Fetch Interval + Ms + path: source.sgCluster.debeziumProperties.xminFetchIntervalMs + - description: 'Default `io.debezium.schema.SchemaTopicNamingStrategy`. + The name of the TopicNamingStrategy class that should be used to determine + the topic name for data change, schema change, transaction, heartbeat + event etc., defaults to SchemaTopicNamingStrategy. + + ' + displayName: Source SGCluster Debezium Properties Topic Naming Strategy + path: source.sgCluster.debeziumProperties.topicNamingStrategy + - description: 'Default `.`. Specify the delimiter for topic name, defaults + to ".". + + ' + displayName: Source SGCluster Debezium Properties Topic Delimiter + path: source.sgCluster.debeziumProperties.topicDelimiter + - description: 'Default `10000`. The size used for holding the topic names + in bounded concurrent hash map. This cache will help to determine the + topic name corresponding to a given data collection. + + ' + displayName: Source SGCluster Debezium Properties Topic Cache Size + path: source.sgCluster.debeziumProperties.topicCacheSize + - description: 'Default `__debezium-heartbeat`. Controls the name of the + topic to which the connector sends heartbeat messages. For example, + if the topic prefix is fulfillment, the default topic name is __debezium-heartbeat.fulfillment. + + ' + displayName: Source SGCluster Debezium Properties Topic Heartbeat Prefix + path: source.sgCluster.debeziumProperties.topicHeartbeatPrefix + - description: 'Default `transaction`. Controls the name of the topic to + which the connector sends transaction metadata messages. For example, + if the topic prefix is fulfillment, the default topic name is fulfillment.transaction. + + ' + displayName: Source SGCluster Debezium Properties Topic Transaction + path: source.sgCluster.debeziumProperties.topicTransaction + - description: 'Default `1`. Specifies the number of threads that the connector + uses when performing an initial snapshot. To enable parallel initial + snapshots, set the property to a value greater than 1. In a parallel + initial snapshot, the connector processes multiple tables concurrently. + This feature is incubating. + + ' + displayName: Source SGCluster Debezium Properties Snapshot Max Threads + path: source.sgCluster.debeziumProperties.snapshotMaxThreads + - displayName: Source SGCluster Debezium Properties Custom Metric Tags + path: source.sgCluster.debeziumProperties.customMetricTags + - description: 'Default `-1`. Specifies how the connector responds after + an operation that results in a retriable error, such as a connection + error. + + + Set one of the following options: + + + * `-1`: No limit. The connector always restarts automatically, and retries + the operation, regardless of the number of previous failures. + + * `0`: Disabled. The connector fails immediately, and never retries + the operation. User intervention is required to restart the connector. + + * `> 0`: The connector restarts automatically until it reaches the specified + maximum number of retries. After the next failure, the connector stops, + and user intervention is required to restart it. + + ' + displayName: Source SGCluster Debezium Properties Errors Max Retries + path: source.sgCluster.debeziumProperties.errorsMaxRetries + - description: 'The hostname of the Postgres instance. + + ' + displayName: Source Postgres Host + path: source.postgres.host + - description: 'The port of the Postgres instance. When not specified port + 5432 will be used. + + ' + displayName: Source Postgres Port + path: source.postgres.port + - description: 'The target database name to which the CDC process will connect + to. + + + If not specified the default postgres database will be targeted. + + ' + displayName: Source Postgres Database + path: source.postgres.database + - description: 'The Secret name where the username is stored. + + ' + displayName: Source Postgres Username Name + path: source.postgres.username.name + - description: 'The Secret key where the username is stored. + + ' + displayName: Source Postgres Username Key + path: source.postgres.username.key + - description: 'The Secret name where the password is stored. + + ' + displayName: Source Postgres Password Name + path: source.postgres.password.name + - description: 'The Secret key where the password is stored. + + ' + displayName: Source Postgres Password Key + path: source.postgres.password.key + - description: 'A regular expressions that allow to match one or more `.
.` + entries to be filtered before sending to the target. + + ' + displayName: Source Postgres Includes + path: source.postgres.includes + - description: 'A regular expressions that allow to match one or more `.
.` + entries to be filtered out before sending to the target. + + ' + displayName: Source Postgres Excludes + path: source.postgres.excludes + - description: 'Default `pgoutput`. The name of the [PostgreSQL logical + decoding plug-in](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-output-plugin) + installed on the PostgreSQL server. Supported values are decoderbufs, + and pgoutput. + + ' + displayName: Source Postgres Debezium Properties Plugin Name + path: source.postgres.debeziumProperties.pluginName + - description: 'Default . (with all characters + that are not `[a-zA-Z0-9]` changed to `_` character). The name of the + PostgreSQL logical decoding slot that was created for streaming changes + from a particular plug-in for a particular database/schema. The server + uses this slot to stream events to the Debezium connector that you are + configuring. + + + Slot names must conform to [PostgreSQL replication slot naming rules](https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION), + which state: "Each replication slot has a name, which can contain lower-case + letters, numbers, and the underscore character." + + ' + displayName: Source Postgres Debezium Properties Slot Name + path: source.postgres.debeziumProperties.slotName + - description: 'Default `true`. Whether or not to delete the logical replication + slot when the connector stops in a graceful, expected way. The default + behavior is that the replication slot remains configured for the connector + when the connector stops. When the connector restarts, having the same + replication slot enables the connector to start processing where it + left off. Set to true in only testing or development environments. Dropping + the slot allows the database to discard WAL segments. When the connector + restarts it performs a new snapshot or it can continue from a persistent + offset in the Kafka Connect offsets topic. + + ' + displayName: Source Postgres Debezium Properties Slot Drop On Stop + path: source.postgres.debeziumProperties.slotDropOnStop + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default . (with all characters + that are not `[a-zA-Z0-9]` changed to `_` character). The name of the + PostgreSQL publication created for streaming changes when using pgoutput. + This publication is created at start-up if it does not already exist + and it includes all tables. Debezium then applies its own include/exclude + list filtering, if configured, to limit the publication to change events + for the specific tables of interest. The connector user must have superuser + permissions to create this publication, so it is usually preferable + to create the publication before starting the connector for the first + time. If the publication already exists, either for all tables or configured + with a subset of tables, Debezium uses the publication as it is defined. + + ' + displayName: Source Postgres Debezium Properties Publication Name + path: source.postgres.debeziumProperties.publicationName + - description: 'Default `false`. Specifies whether to skip publishing messages + when there is no change in included columns. This would essentially + filter messages if there is no change in columns included as per includes + or excludes fields. Note: Only works when REPLICA IDENTITY of the table + is set to FULL + + ' + displayName: Source Postgres Debezium Properties Skip Messages Without + Change + path: source.postgres.debeziumProperties.skipMessagesWithoutChange + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `adaptive`. Time, date, and timestamps can be represented + with different kinds of precision: + + + * `adaptive`: captures the time and timestamp values exactly as in the + database using either millisecond, microsecond, or nanosecond precision + values based on the database column’s type. + + * `adaptive_time_microseconds`: captures the date, datetime and timestamp + values exactly as in the database using either millisecond, microsecond, + or nanosecond precision values based on the database column’s type. + An exception is TIME type fields, which are always captured as microseconds. + + * `connect`: always represents time and timestamp values by using Kafka + Connect’s built-in representations for Time, Date, and Timestamp, which + use millisecond precision regardless of the database columns'' precision. + For more information, see [temporal values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-temporal-types). + + ' + displayName: Source Postgres Debezium Properties Time Precision Mode + path: source.postgres.debeziumProperties.timePrecisionMode + - description: 'Default `precise`. Specifies how the connector should handle + values for DECIMAL and NUMERIC columns: + + + * `precise`: represents values by using java.math.BigDecimal to represent + values in binary form in change events. + + * `double`: represents values by using double values, which might result + in a loss of precision but which is easier to use. + + * `string`: encodes values as formatted strings, which are easy to consume + but semantic information about the real type is lost. For more information, + see [Decimal types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-decimal-types). + + ' + displayName: Source Postgres Debezium Properties Decimal Handling Mode + path: source.postgres.debeziumProperties.decimalHandlingMode + - description: 'Default `json`. Specifies how the connector should handle + values for hstore columns: + + + * `map`: represents values by using MAP. + + * `json`: represents values by using json string. This setting encodes + values as formatted strings such as {"key" : "val"}. For more information, + see [PostgreSQL HSTORE type](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-hstore-type). + + ' + displayName: Source Postgres Debezium Properties Hstore Handling Mode + path: source.postgres.debeziumProperties.hstoreHandlingMode + - description: "Default `numeric`. Specifies how the connector should handle\ + \ values for interval columns:\n\n * `numeric`: represents intervals\ + \ using approximate number of microseconds.\n * `string`: represents\ + \ intervals exactly by using the string pattern representation PYMDTHMS.\ + \ For example: P1Y2M3DT4H5M6.78S. For more information, see [PostgreSQL\ + \ basic types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-basic-types).\n" + displayName: Source Postgres Debezium Properties Interval Handling Mode + path: source.postgres.debeziumProperties.intervalHandlingMode + - description: 'Default `true`. Controls whether a delete event is followed + by a tombstone event. + + + * `true` - a delete operation is represented by a delete event and a + subsequent tombstone event. + + * `false` - only a delete event is emitted. + + + After a source record is deleted, emitting a tombstone event (the default + behavior) allows Kafka to completely delete all events that pertain + to the key of the deleted row in case [log compaction](https://kafka.apache.org/documentation/#compaction) + is enabled for the topic. + + ' + displayName: Source Postgres Debezium Properties Tombstones On Delete + path: source.postgres.debeziumProperties.tombstonesOnDelete + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Source Postgres Debezium Properties Column Truncate To Length + Chars + path: source.postgres.debeziumProperties.columnTruncateToLengthChars + - displayName: Source Postgres Debezium Properties Column Mask With Length + Chars + path: source.postgres.debeziumProperties.columnMaskWithLengthChars + - displayName: Source Postgres Debezium Properties Column Mask Hash + path: source.postgres.debeziumProperties.columnMaskHash + - displayName: Source Postgres Debezium Properties Column Mask Hash V2 + path: source.postgres.debeziumProperties.columnMaskHashV2 + - displayName: Source Postgres Debezium Properties Column Propagate Source + Type + path: source.postgres.debeziumProperties.columnPropagateSourceType + - displayName: Source Postgres Debezium Properties Datatype Propagate Source + Type + path: source.postgres.debeziumProperties.datatypePropagateSourceType + - displayName: Source Postgres Debezium Properties Message Key Columns + path: source.postgres.debeziumProperties.messageKeyColumns + - description: 'Default `all_tables`. Applies only when streaming changes + by using [the pgoutput plug-in](https://www.postgresql.org/docs/current/sql-createpublication.html). + The setting determines how creation of a [publication](https://www.postgresql.org/docs/current/logical-replication-publication.html) + should work. Specify one of the following values: + + + * `all_tables` - If a publication exists, the connector uses it. If + a publication does not exist, the connector creates a publication for + all tables in the database for which the connector is capturing changes. + For the connector to create a publication it must access the database + through a database user account that has permission to create publications + and perform replications. You grant the required permission by using + the following SQL command CREATE PUBLICATION FOR + ALL TABLES;. + + * `disabled` - The connector does not attempt to create a publication. + A database administrator or the user configured to perform replications + must have created the publication before running the connector. If the + connector cannot find the publication, the connector throws an exception + and stops. + + * `filtered` - If a publication exists, the connector uses it. If no + publication exists, the connector creates a new publication for tables + that match the current filter configuration as specified by the schema.include.list, + schema.exclude.list, and table.include.list, and table.exclude.list + connector configuration properties. For example: CREATE PUBLICATION + FOR TABLE . If the publication + exists, the connector updates the publication for tables that match + the current filter configuration. For example: ALTER PUBLICATION + SET TABLE . + + ' + displayName: Source Postgres Debezium Properties Publication Autocreate + Mode + path: source.postgres.debeziumProperties.publicationAutocreateMode + - displayName: Source Postgres Debezium Properties Replica Identity Autoset + Values + path: source.postgres.debeziumProperties.replicaIdentityAutosetValues + - description: 'Default `bytes`. Specifies how binary (bytea) columns should + be represented in change events: + + + * `bytes` represents binary data as byte array. + + * `base64` represents binary data as base64-encoded strings. + + * `base64-url-safe` represents binary data as base64-url-safe-encoded + strings. + + * `hex` represents binary data as hex-encoded (base16) strings. + + ' + displayName: Source Postgres Debezium Properties Binary Handling Mode + path: source.postgres.debeziumProperties.binaryHandlingMode + - description: 'Default `none`. Specifies how schema names should be adjusted + for compatibility with the message converter used by the connector. + Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used in the Avro type + name with underscore. + + * `avro_unicode` replaces the underscore or characters that cannot be + used in the Avro type name with corresponding unicode like _uxxxx. Note: + _ is an escape sequence like backslash in Java + + ' + displayName: Source Postgres Debezium Properties Schema Name Adjustment + Mode + path: source.postgres.debeziumProperties.schemaNameAdjustmentMode + - description: 'Default `none`. Specifies how field names should be adjusted + for compatibility with the message converter used by the connector. + Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used in the Avro type + name with underscore. + + * `avro_unicode` replaces the underscore or characters that cannot be + used in the Avro type name with corresponding unicode like _uxxxx. Note: + _ is an escape sequence like backslash in Java + + + For more information, see [Avro naming](https://debezium.io/documentation/reference/stable/configuration/avro.html#avro-naming). + + ' + displayName: Source Postgres Debezium Properties Field Name Adjustment + Mode + path: source.postgres.debeziumProperties.fieldNameAdjustmentMode + - description: 'Default `2`. Specifies how many decimal digits should be + used when converting Postgres money type to java.math.BigDecimal, which + represents the values in change events. Applicable only when decimalHandlingMode + is set to precise. + + ' + displayName: Source Postgres Debezium Properties Money Fraction Digits + path: source.postgres.debeziumProperties.moneyFractionDigits + - displayName: Source Postgres Debezium Properties Converters + path: source.postgres.debeziumProperties.converters + - description: "Default `initial`. Specifies the criteria for performing\ + \ a snapshot when the connector starts:\n\n* `always` - The connector\ + \ performs a snapshot every time that it starts. The snapshot includes\ + \ the structure and data of the captured tables. Specify this value\ + \ to populate topics with a complete representation of the data from\ + \ the captured tables every time that the connector starts. After the\ + \ snapshot completes, the connector begins to stream event records for\ + \ subsequent database changes.\n* `initial` - The connector performs\ + \ a snapshot only when no offsets have been recorded for the logical\ + \ server name.\n* `initial_only` - The connector performs an initial\ + \ snapshot and then stops, without processing any subsequent changes.\n\ + * `no_data` - The connector never performs snapshots. When a connector\ + \ is configured this way, after it starts, it behaves as follows: If\ + \ there is a previously stored LSN in the Kafka offsets topic, the connector\ + \ continues streaming changes from that position. If no LSN is stored,\ + \ the connector starts streaming changes from the point in time when\ + \ the PostgreSQL logical replication slot was created on the server.\ + \ Use this snapshot mode only when you know all data of interest is\ + \ still reflected in the WAL.\n* `never` - Deprecated see no_data.\n\ + * `when_needed` - After the connector starts, it performs a snapshot\ + \ only if it detects one of the following circumstances: \n It cannot\ + \ detect any topic offsets.\n A previously recorded offset specifies\ + \ a log position that is not available on the server.\n* `configuration_based`\ + \ - With this option, you control snapshot behavior through a set of\ + \ connector properties that have the prefix 'snapshotModeConfigurationBased'.\n\ + * `custom` - The connector performs a snapshot according to the implementation\ + \ specified by the snapshotModeCustomName property, which defines a\ + \ custom implementation of the io.debezium.spi.snapshot.Snapshotter\ + \ interface.\n\nFor more information, see the [table of snapshot.mode\ + \ options](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-connector-snapshot-mode-options).\n" + displayName: Source Postgres Debezium Properties Snapshot Mode + path: source.postgres.debeziumProperties.snapshotMode + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector includes table data + when it performs a snapshot. + + ' + displayName: Source Postgres Debezium Properties Snapshot Mode Configuration + Based Snapshot Data + path: source.postgres.debeziumProperties.snapshotModeConfigurationBasedSnapshotData + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector includes the table + schema when it performs a snapshot. + + ' + displayName: Source Postgres Debezium Properties Snapshot Mode Configuration + Based Snapshot Schema + path: source.postgres.debeziumProperties.snapshotModeConfigurationBasedSnapshotSchema + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector begins to stream + change events after a snapshot completes. + + ' + displayName: Source Postgres Debezium Properties Snapshot Mode Configuration + Based Start Stream + path: source.postgres.debeziumProperties.snapshotModeConfigurationBasedStartStream + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + set this property to specify whether the connector includes table schema + in a snapshot if the schema history topic is not available. + + ' + displayName: Source Postgres Debezium Properties Snapshot Mode Configuration + Based Snapshot On Schema Error + path: source.postgres.debeziumProperties.snapshotModeConfigurationBasedSnapshotOnSchemaError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `false`. If the snapshotMode is set to configuration_based, + this property specifies whether the connector attempts to snapshot table + data if it does not find the last committed offset in the transaction + log. Set the value to true to instruct the connector to perform a new + snapshot. + + ' + displayName: Source Postgres Debezium Properties Snapshot Mode Configuration + Based Snapshot On Data Error + path: source.postgres.debeziumProperties.snapshotModeConfigurationBasedSnapshotOnDataError + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'When snapshotMode is set as custom, use this setting to + specify the name of the custom implementation provided in the name() + method that is defined by the ''io.debezium.spi.snapshot.Snapshotter'' + interface. The provided implementation is called after a connector restart + to determine whether to perform a snapshot. For more information, see + [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + displayName: Source Postgres Debezium Properties Snapshot Mode Custom + Name + path: source.postgres.debeziumProperties.snapshotModeCustomName + - description: 'Default `none`. Specifies how the connector holds locks + on tables while performing a schema snapshot. Set one of the following + options: + + + * `shared`: The connector holds a table lock that prevents exclusive + table access during the initial portion phase of the snapshot in which + database schemas and other metadata are read. After the initial phase, + the snapshot no longer requires table locks. + + * `none`: The connector avoids locks entirely. Do not use this mode + if schema changes might occur during the snapshot. + + + > *WARNING*: Do not use this mode if schema changes might occur during + the snapshot. + + + * `custom`: The connector performs a snapshot according to the implementation + specified by the snapshotLockingModeCustomName property, which is a + custom implementation of the io.debezium.spi.snapshot.SnapshotLock interface. + + ' + displayName: Source Postgres Debezium Properties Snapshot Locking Mode + path: source.postgres.debeziumProperties.snapshotLockingMode + - description: 'When snapshotLockingMode is set to custom, use this setting + to specify the name of the custom implementation provided in the name() + method that is defined by the ''io.debezium.spi.snapshot.SnapshotLock'' + interface. For more information, see [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + displayName: Source Postgres Debezium Properties Snapshot Locking Mode + Custom Name + path: source.postgres.debeziumProperties.snapshotLockingModeCustomName + - description: 'Default `select_all`. Specifies how the connector queries + data while performing a snapshot. Set one of the following options: + + + * `select_all`: The connector performs a select all query by default, + optionally adjusting the columns selected based on the column include + and exclude list configurations. + + * `custom`: The connector performs a snapshot query according to the + implementation specified by the snapshotQueryModeCustomName property, + which defines a custom implementation of the io.debezium.spi.snapshot.SnapshotQuery + interface. This setting enables you to manage snapshot content in a + more flexible manner compared to using the snapshotSelectStatementOverrides + property. + + ' + displayName: Source Postgres Debezium Properties Snapshot Query Mode + path: source.postgres.debeziumProperties.snapshotQueryMode + - description: 'When snapshotQueryMode is set as custom, use this setting + to specify the name of the custom implementation provided in the name() + method that is defined by the ''io.debezium.spi.snapshot.SnapshotQuery'' + interface. For more information, see [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + displayName: Source Postgres Debezium Properties Snapshot Query Mode Custom + Name + path: source.postgres.debeziumProperties.snapshotQueryModeCustomName + - displayName: Source Postgres Debezium Properties Snapshot Include Collection + List + path: source.postgres.debeziumProperties.snapshotIncludeCollectionList + - description: 'Default `10000`. Positive integer value that specifies the + maximum amount of time (in milliseconds) to wait to obtain table locks + when performing a snapshot. If the connector cannot acquire table locks + in this time interval, the snapshot fails. [How the connector performs + snapshots](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-snapshots) + provides details. + + ' + displayName: Source Postgres Debezium Properties Snapshot Lock Timeout + Ms + path: source.postgres.debeziumProperties.snapshotLockTimeoutMs + - displayName: Source Postgres Debezium Properties Snapshot Select Statement + Overrides + path: source.postgres.debeziumProperties.snapshotSelectStatementOverrides + - description: 'Default `fail`. Specifies how the connector should react + to exceptions during processing of events: + + + * `fail`: propagates the exception, indicates the offset of the problematic + event, and causes the connector to stop. + + * `warn`: logs the offset of the problematic event, skips that event, + and continues processing. + + * `skip`: skips the problematic event and continues processing. + + ' + displayName: Source Postgres Debezium Properties Event Processing Failure + Handling Mode + path: source.postgres.debeziumProperties.eventProcessingFailureHandlingMode + - description: 'Default `2048`. Positive integer value that specifies the + maximum size of each batch of events that the connector processes. + + ' + displayName: Source Postgres Debezium Properties Max Batch Size + path: source.postgres.debeziumProperties.maxBatchSize + - description: 'Default `8192`. Positive integer value that specifies the + maximum number of records that the blocking queue can hold. When Debezium + reads events streamed from the database, it places the events in the + blocking queue before it writes them to Kafka. The blocking queue can + provide backpressure for reading change events from the database in + cases where the connector ingests messages faster than it can write + them to Kafka, or when Kafka becomes unavailable. Events that are held + in the queue are disregarded when the connector periodically records + offsets. Always set the value of maxQueueSize to be larger than the + value of maxBatchSize. + + ' + displayName: Source Postgres Debezium Properties Max Queue Size + path: source.postgres.debeziumProperties.maxQueueSize + - description: "Default `0`. A long integer value that specifies the maximum\ + \ volume of the blocking queue in bytes. By default, volume limits are\ + \ not specified for the blocking queue. To specify the number of bytes\ + \ that the queue can consume, set this property to a positive long value.\n\ + \ If maxQueueSize is also set, writing to the queue is blocked when\ + \ the size of the queue reaches the limit specified by either property.\ + \ For example, if you set maxQueueSize=1000, and maxQueueSizeInBytes=5000,\ + \ writing to the queue is blocked after the queue contains 1000 records,\ + \ or after the volume of the records in the queue reaches 5000 bytes.\n" + displayName: Source Postgres Debezium Properties Max Queue Size In Bytes + path: source.postgres.debeziumProperties.maxQueueSizeInBytes + - description: 'Default `500`. Positive integer value that specifies the + number of milliseconds the connector should wait for new change events + to appear before it starts processing a batch of events. Defaults to + 500 milliseconds. + + ' + displayName: Source Postgres Debezium Properties Poll Interval Ms + path: source.postgres.debeziumProperties.pollIntervalMs + - description: "Default `true`. Specifies connector behavior when the connector\ + \ encounters a field whose data type is unknown. The default behavior\ + \ is that the connector omits the field from the change event and logs\ + \ a warning.\n Set this property to true if you want the change event\ + \ to contain an opaque binary representation of the field. This lets\ + \ consumers decode the field. You can control the exact representation\ + \ by setting the binaryHandlingMode property.\n> *NOTE*: Consumers risk\ + \ backward compatibility issues when `includeUnknownDatatypes` is set\ + \ to `true`. Not only may the database-specific binary representation\ + \ change between releases, but if the data type is eventually supported\ + \ by Debezium, the data type will be sent downstream in a logical type,\ + \ which would require adjustments by consumers. In general, when encountering\ + \ unsupported data types, create a feature request so that support can\ + \ be added.\n" + displayName: Source Postgres Debezium Properties Include Unknown Datatypes + path: source.postgres.debeziumProperties.includeUnknownDatatypes + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - displayName: Source Postgres Debezium Properties Database Initial Statements + path: source.postgres.debeziumProperties.databaseInitialStatements + - description: 'Default `10000`. Frequency for sending replication connection + status updates to the server, given in milliseconds. The property also + controls how frequently the database status is checked to detect a dead + connection in case the database was shut down. + + ' + displayName: Source Postgres Debezium Properties Status Update Interval + Ms + path: source.postgres.debeziumProperties.statusUpdateIntervalMs + - description: "Default `0`. Controls how frequently the connector sends\ + \ heartbeat messages to a Kafka topic. The default behavior is that\ + \ the connector does not send heartbeat messages.\n Heartbeat messages\ + \ are useful for monitoring whether the connector is receiving change\ + \ events from the database. Heartbeat messages might help decrease the\ + \ number of change events that need to be re-sent when a connector restarts.\ + \ To send heartbeat messages, set this property to a positive integer,\ + \ which indicates the number of milliseconds between heartbeat messages.\n\ + \ Heartbeat messages are needed when there are many updates in a database\ + \ that is being tracked but only a tiny number of updates are related\ + \ to the table(s) and schema(s) for which the connector is capturing\ + \ changes. In this situation, the connector reads from the database\ + \ transaction log as usual but rarely emits change records to Kafka.\ + \ This means that no offset updates are committed to Kafka and the connector\ + \ does not have an opportunity to send the latest retrieved LSN to the\ + \ database. The database retains WAL files that contain events that\ + \ have already been processed by the connector. Sending heartbeat messages\ + \ enables the connector to send the latest retrieved LSN to the database,\ + \ which allows the database to reclaim disk space being used by no longer\ + \ needed WAL files.\n" + displayName: Source Postgres Debezium Properties Heartbeat Interval Ms + path: source.postgres.debeziumProperties.heartbeatIntervalMs + - description: "Specifies a query that the connector executes on the source\ + \ database when the connector sends a heartbeat message.\n This is useful\ + \ for resolving the situation described in [WAL disk space consumption](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-wal-disk-space),\ + \ where capturing changes from a low-traffic database on the same host\ + \ as a high-traffic database prevents Debezium from processing WAL records\ + \ and thus acknowledging WAL positions with the database. To address\ + \ this situation, create a heartbeat table in the low-traffic database,\ + \ and set this property to a statement that inserts records into that\ + \ table, for example:\n\n ```\n INSERT INTO test_heartbeat_table (text)\ + \ VALUES ('test_heartbeat')\n ```\n \n This allows the connector to\ + \ receive changes from the low-traffic database and acknowledge their\ + \ LSNs, which prevents unbounded WAL growth on the database host.\n" + displayName: Source Postgres Debezium Properties Heartbeat Action Query + path: source.postgres.debeziumProperties.heartbeatActionQuery + - description: 'Default `columns_diff`. Specify the conditions that trigger + a refresh of the in-memory schema for a table. + + + * `columns_diff`: is the safest mode. It ensures that the in-memory + schema stays in sync with the database table’s schema at all times. + + * `columns_diff_exclude_unchanged_toast`: instructs the connector to + refresh the in-memory schema cache if there is a discrepancy with the + schema derived from the incoming message, unless unchanged TOASTable + data fully accounts for the discrepancy. + + + This setting can significantly improve connector performance if there + are frequently-updated tables that have TOASTed data that are rarely + part of updates. However, it is possible for the in-memory schema to + become outdated if TOASTable columns are dropped from the table. + + ' + displayName: Source Postgres Debezium Properties Schema Refresh Mode + path: source.postgres.debeziumProperties.schemaRefreshMode + - description: 'An interval in milliseconds that the connector should wait + before performing a snapshot when the connector starts. If you are starting + multiple connectors in a cluster, this property is useful for avoiding + snapshot interruptions, which might cause re-balancing of connectors. + + ' + displayName: Source Postgres Debezium Properties Snapshot Delay Ms + path: source.postgres.debeziumProperties.snapshotDelayMs + - description: 'Default `10240`. During a snapshot, the connector reads + table content in batches of rows. This property specifies the maximum + number of rows in a batch. + + ' + displayName: Source Postgres Debezium Properties Snapshot Fetch Size + path: source.postgres.debeziumProperties.snapshotFetchSize + - displayName: Source Postgres Debezium Properties Slot Stream Params + path: source.postgres.debeziumProperties.slotStreamParams + - description: 'Default `6`. If connecting to a replication slot fails, + this is the maximum number of consecutive attempts to connect. + + ' + displayName: Source Postgres Debezium Properties Slot Max Retries + path: source.postgres.debeziumProperties.slotMaxRetries + - description: 'Default `10000` (10 seconds). The number of milliseconds + to wait between retry attempts when the connector fails to connect to + a replication slot. + + ' + displayName: Source Postgres Debezium Properties Slot Retry Delay Ms + path: source.postgres.debeziumProperties.slotRetryDelayMs + - description: 'Default `__debezium_unavailable_value`. Specifies the constant + that the connector provides to indicate that the original value is a + toasted value that is not provided by the database. If the setting of + unavailable.value.placeholder starts with the hex: prefix it is expected + that the rest of the string represents hexadecimally encoded octets. + For more information, see [toasted values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-toasted-values). + + ' + displayName: Source Postgres Debezium Properties Unavailable Value Placeholder + path: source.postgres.debeziumProperties.unavailableValuePlaceholder + - description: 'Default `false`. Determines whether the connector generates + events with transaction boundaries and enriches change event envelopes + with transaction metadata. Specify true if you want the connector to + do this. For more information, see [Transaction metadata](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-transaction-metadata). + + ' + displayName: Source Postgres Debezium Properties Provide Transaction Metadata + path: source.postgres.debeziumProperties.provideTransactionMetadata + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `true`. Determines whether the connector should + commit the LSN of the processed records in the source postgres database + so that the WAL logs can be deleted. Specify false if you don’t want + the connector to do this. Please note that if set to false LSN will + not be acknowledged by Debezium and as a result WAL logs will not be + cleared which might result in disk space issues. User is expected to + handle the acknowledgement of LSN outside Debezium. + + ' + displayName: Source Postgres Debezium Properties Flush Lsn Source + path: source.postgres.debeziumProperties.flushLsnSource + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `10000` (10 seconds). The number of milliseconds + to wait before restarting a connector after a retriable error occurs. + + ' + displayName: Source Postgres Debezium Properties Retriable Restart Connector + Wait Ms + path: source.postgres.debeziumProperties.retriableRestartConnectorWaitMs + - displayName: Source Postgres Debezium Properties Skipped Operations + path: source.postgres.debeziumProperties.skippedOperations + - description: 'Fully-qualified name of the data collection that is used + to send signals to the connector. Use the following format to specify + the collection name: . + + ' + displayName: Source Postgres Debezium Properties Signal Data Collection + path: source.postgres.debeziumProperties.signalDataCollection + - displayName: Source Postgres Debezium Properties Signal Enabled Channels + path: source.postgres.debeziumProperties.signalEnabledChannels + - displayName: Source Postgres Debezium Properties Notification Enabled + Channels + path: source.postgres.debeziumProperties.notificationEnabledChannels + - description: 'Default `1024`. The maximum number of rows that the connector + fetches and reads into memory during an incremental snapshot chunk. + Increasing the chunk size provides greater efficiency, because the snapshot + runs fewer snapshot queries of a greater size. However, larger chunk + sizes also require more memory to buffer the snapshot data. Adjust the + chunk size to a value that provides the best performance in your environment. + + ' + displayName: Source Postgres Debezium Properties Incremental Snapshot + Chunk Size + path: source.postgres.debeziumProperties.incrementalSnapshotChunkSize + - description: 'Default `insert_insert`. Specifies the watermarking mechanism + that the connector uses during an incremental snapshot to deduplicate + events that might be captured by an incremental snapshot and then recaptured + after streaming resumes. + + + You can specify one of the following options: + + + * `insert_insert`: When you send a signal to initiate an incremental + snapshot, for every chunk that Debezium reads during the snapshot, it + writes an entry to the signaling data collection to record the signal + to open the snapshot window. After the snapshot completes, Debezium + inserts a second entry to record the closing of the window. + + * `insert_delete`: When you send a signal to initiate an incremental + snapshot, for every chunk that Debezium reads, it writes a single entry + to the signaling data collection to record the signal to open the snapshot + window. After the snapshot completes, this entry is removed. No entry + is created for the signal to close the snapshot window. Set this option + to prevent rapid growth of the signaling data collection. + + ' + displayName: Source Postgres Debezium Properties Incremental Snapshot + Watermarking Strategy + path: source.postgres.debeziumProperties.incrementalSnapshotWatermarkingStrategy + - description: 'Default `0`. How often, in milliseconds, the XMIN will be + read from the replication slot. The XMIN value provides the lower bounds + of where a new replication slot could start from. The default value + of 0 disables tracking XMIN tracking. + + ' + displayName: Source Postgres Debezium Properties Xmin Fetch Interval Ms + path: source.postgres.debeziumProperties.xminFetchIntervalMs + - description: 'Default `io.debezium.schema.SchemaTopicNamingStrategy`. + The name of the TopicNamingStrategy class that should be used to determine + the topic name for data change, schema change, transaction, heartbeat + event etc., defaults to SchemaTopicNamingStrategy. + + ' + displayName: Source Postgres Debezium Properties Topic Naming Strategy + path: source.postgres.debeziumProperties.topicNamingStrategy + - description: 'Default `.`. Specify the delimiter for topic name, defaults + to ".". + + ' + displayName: Source Postgres Debezium Properties Topic Delimiter + path: source.postgres.debeziumProperties.topicDelimiter + - description: 'Default `10000`. The size used for holding the topic names + in bounded concurrent hash map. This cache will help to determine the + topic name corresponding to a given data collection. + + ' + displayName: Source Postgres Debezium Properties Topic Cache Size + path: source.postgres.debeziumProperties.topicCacheSize + - description: 'Default `__debezium-heartbeat`. Controls the name of the + topic to which the connector sends heartbeat messages. For example, + if the topic prefix is fulfillment, the default topic name is __debezium-heartbeat.fulfillment. + + ' + displayName: Source Postgres Debezium Properties Topic Heartbeat Prefix + path: source.postgres.debeziumProperties.topicHeartbeatPrefix + - description: 'Default `transaction`. Controls the name of the topic to + which the connector sends transaction metadata messages. For example, + if the topic prefix is fulfillment, the default topic name is fulfillment.transaction. + + ' + displayName: Source Postgres Debezium Properties Topic Transaction + path: source.postgres.debeziumProperties.topicTransaction + - description: 'Default `1`. Specifies the number of threads that the connector + uses when performing an initial snapshot. To enable parallel initial + snapshots, set the property to a value greater than 1. In a parallel + initial snapshot, the connector processes multiple tables concurrently. + This feature is incubating. + + ' + displayName: Source Postgres Debezium Properties Snapshot Max Threads + path: source.postgres.debeziumProperties.snapshotMaxThreads + - displayName: Source Postgres Debezium Properties Custom Metric Tags + path: source.postgres.debeziumProperties.customMetricTags + - description: 'Default `-1`. Specifies how the connector responds after + an operation that results in a retriable error, such as a connection + error. + + + Set one of the following options: + + + * `-1`: No limit. The connector always restarts automatically, and retries + the operation, regardless of the number of previous failures. + + * `0`: Disabled. The connector fails immediately, and never retries + the operation. User intervention is required to restart the connector. + + * `> 0`: The connector restarts automatically until it reaches the specified + maximum number of retries. After the next failure, the connector stops, + and user intervention is required to restart it. + + ' + displayName: Source Postgres Debezium Properties Errors Max Retries + path: source.postgres.debeziumProperties.errorsMaxRetries + - description: 'Indicate the type of target of this stream. Possible values + are: + + + * `CloudEvent`: events will be sent to a cloud event receiver. + + * `PgLambda`: events will trigger the execution of a lambda script by + integrating with [Knative Service](https://knative.dev/docs/serving/) + (Knative must be already installed). + + * `SGCluster`: events will be sinked to an SGCluster allowing migration + of data. + + ' + displayName: Target Type + path: target.type + - description: 'The CloudEvent format (json by default). + + + Only json is supported at the moment. + + ' + displayName: Target Cloud Event Format + path: target.cloudEvent.format + - description: 'The CloudEvent binding (http by default). + + + Only http is supported at the moment. + + ' + displayName: Target Cloud Event Binding + path: target.cloudEvent.binding + - description: The URL used to send the CloudEvents to the endpoint. + displayName: Target Cloud Event Http Url + path: target.cloudEvent.http.url + - displayName: Target Cloud Event Http Headers + path: target.cloudEvent.http.headers + - description: 'Set the connect timeout. + + + Value 0 represents infinity (default). Negative values are not allowed. + + ' + displayName: Target Cloud Event Http Connect Timeout + path: target.cloudEvent.http.connectTimeout + - description: 'Set the read timeout. The value is the timeout to read a + response. + + + Value 0 represents infinity (default). Negative values are not allowed. + + ' + displayName: Target Cloud Event Http Read Timeout + path: target.cloudEvent.http.readTimeout + - description: 'Set the retry limit. When set the event will be sent again + after an error for the specified limit of times. When not set the event + will be sent again after an error. + + ' + displayName: Target Cloud Event Http Retry Limit + path: target.cloudEvent.http.retryLimit + - description: 'The maximum amount of delay in seconds after an error before + retrying again. + + + The initial delay will use 10% of this value and then increase the value + exponentially up to the maximum amount of seconds specified with this + field. + + ' + displayName: Target Cloud Event Http Retry Backoff Delay + path: target.cloudEvent.http.retryBackoffDelay + - description: When `true` disable hostname verification. + displayName: Target Cloud Event Http Skip Hostname Verification + path: target.cloudEvent.http.skipHostnameVerification + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: "The PgLambda script format (javascript by default).\n\n\ + * `javascript`: the script will receive the following variable:\n *\ + \ `request`: the HTTP request object. See https://nodejs.org/docs/latest-v18.x/api/http.html#class-httpclientrequest\n\ + \ * `response`: the HTTP response object. See https://nodejs.org/docs/latest-v18.x/api/http.html#class-httpserverresponse\n\ + \ * `event`: the CloudEvent event object. See https://github.com/cloudevents/sdk-javascript\n" + displayName: Target Pg Lambda Script Type + path: target.pgLambda.scriptType + - description: 'Script to execute. This field is mutually exclusive with + `scriptFrom` field. + + ' + displayName: Target Pg Lambda Script + path: target.pgLambda.script + - description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + displayName: Target Pg Lambda Script From Secret Key Ref Name + path: target.pgLambda.scriptFrom.secretKeyRef.name + - description: The key of the secret to select from. Must be a valid secret + key. + displayName: Target Pg Lambda Script From Secret Key Ref Key + path: target.pgLambda.scriptFrom.secretKeyRef.key + - description: 'The name of the ConfigMap that contains the script to execute. + + ' + displayName: Target Pg Lambda Script From Config Map Key Ref Name + path: target.pgLambda.scriptFrom.configMapKeyRef.name + - description: 'The key name within the ConfigMap that contains the script + to execute. + + ' + displayName: Target Pg Lambda Script From Config Map Key Ref Key + path: target.pgLambda.scriptFrom.configMapKeyRef.key + - displayName: Target Pg Lambda Knative Annotations + path: target.pgLambda.knative.annotations + - displayName: Target Pg Lambda Knative Labels + path: target.pgLambda.knative.labels + - description: The URL used to send the CloudEvents to the endpoint. + displayName: Target Pg Lambda Knative Http Url + path: target.pgLambda.knative.http.url + - displayName: Target Pg Lambda Knative Http Headers + path: target.pgLambda.knative.http.headers + - description: 'Set the connect timeout. + + + Value 0 represents infinity (default). Negative values are not allowed. + + ' + displayName: Target Pg Lambda Knative Http Connect Timeout + path: target.pgLambda.knative.http.connectTimeout + - description: 'Set the read timeout. The value is the timeout to read a + response. + + + Value 0 represents infinity (default). Negative values are not allowed. + + ' + displayName: Target Pg Lambda Knative Http Read Timeout + path: target.pgLambda.knative.http.readTimeout + - description: 'Set the retry limit. When set the event will be sent again + after an error for the specified limit of times. When not set the event + will be sent again after an error. + + ' + displayName: Target Pg Lambda Knative Http Retry Limit + path: target.pgLambda.knative.http.retryLimit + - description: 'The maximum amount of delay in seconds after an error before + retrying again. + + + The initial delay will use 10% of this value and then increase the value + exponentially up to the maximum amount of seconds specified with this + field. + + ' + displayName: Target Pg Lambda Knative Http Retry Backoff Delay + path: target.pgLambda.knative.http.retryBackoffDelay + - description: When `true` disable hostname verification. + displayName: Target Pg Lambda Knative Http Skip Hostname Verification + path: target.pgLambda.knative.http.skipHostnameVerification + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The target SGCluster name. + + ' + displayName: Target SGCluster Name + path: target.sgCluster.name + - description: 'The target database name to which the data will be migrated + to. + + + If not specified the default postgres database will be targeted. + + ' + displayName: Target SGCluster Database + path: target.sgCluster.database + - description: 'The Secret name where the username is stored. + + ' + displayName: Target SGCluster Username Name + path: target.sgCluster.username.name + - description: 'The Secret key where the username is stored. + + ' + displayName: Target SGCluster Username Key + path: target.sgCluster.username.key + - description: 'The Secret name where the password is stored. + + ' + displayName: Target SGCluster Password Name + path: target.sgCluster.password.name + - description: 'The Secret key where the password is stored. + + ' + displayName: Target SGCluster Password Key + path: target.sgCluster.password.key + - description: 'When `true` disable import of DDL and tables will be created + on demand by Debezium. + + ' + displayName: Target SGCluster Skip Ddl Import + path: target.sgCluster.skipDdlImport + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Allow to set a [SIMILAR TO regular expression](https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-SIMILARTO-REGEXP) + to match the names of the roles to skip during import of DDL. + + + When not set and source is an SGCluster will match the superuser, replicator + and authenticator usernames. + + ' + displayName: Target SGCluster Ddl Import Role Skip Filter + path: target.sgCluster.ddlImportRoleSkipFilter + - description: 'Default `5`. Specifies the minimum number of connections + in the pool. + + ' + displayName: Target SGCluster Debezium Properties Connection Pool Min_size + path: target.sgCluster.debeziumProperties.connectionPoolMin_size + - description: 'Default `32`. Specifies the maximum number of concurrent + connections that the pool maintains. + + ' + displayName: Target SGCluster Debezium Properties Connection Pool Max_size + path: target.sgCluster.debeziumProperties.connectionPoolMax_size + - description: 'Default `32`. Specifies the number of connections that the + connector attempts to acquire if the connection pool exceeds its maximum + size. + + ' + displayName: Target SGCluster Debezium Properties Connection Pool Acquire_increment + path: target.sgCluster.debeziumProperties.connectionPoolAcquire_increment + - description: 'Default `1800`. Specifies the number of seconds that an + unused connection is kept before it is discarded. + + ' + displayName: Target SGCluster Debezium Properties Connection Pool Timeout + path: target.sgCluster.debeziumProperties.connectionPoolTimeout + - description: 'Default `UTC`. Specifies the timezone used when inserting + JDBC temporal values. + + ' + displayName: Target SGCluster Debezium Properties Database Time_zone + path: target.sgCluster.debeziumProperties.databaseTime_zone + - description: 'Default `true`. Specifies whether the connector processes + DELETE or tombstone events and removes the corresponding row from the + database. Use of this option requires that you set the `primaryKeyMode` + to `record_key`. + + ' + displayName: Target SGCluster Debezium Properties Delete Enabled + path: target.sgCluster.debeziumProperties.deleteEnabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `true`. Specifies whether the connector processes + TRUNCATE events and truncates the corresponding tables from the database. + + Although support for TRUNCATE statements has been available in Db2 since + version 9.7, currently, the JDBC connector is unable to process standard + TRUNCATE events that the Db2 connector emits. + + To ensure that the JDBC connector can process TRUNCATE events received + from Db2, perform the truncation by using an alternative to the standard + TRUNCATE TABLE statement. For example: + + + ``` + + ALTER TABLE ACTIVATE NOT LOGGED INITIALLY WITH EMPTY TABLE + + ``` + + + The user account that submits the preceding query requires ALTER privileges + on the table to be truncated. + + ' + displayName: Target SGCluster Debezium Properties Truncate Enabled + path: target.sgCluster.debeziumProperties.truncateEnabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `upsert`. Specifies the strategy used to insert + events into the database. The following options are available: + + * `insert`: Specifies that all events should construct INSERT-based + SQL statements. Use this option only when no primary key is used, or + when you can be certain that no updates can occur to rows with existing + primary key values. + + * `update`: Specifies that all events should construct UPDATE-based + SQL statements. Use this option only when you can be certain that the + connector receives only events that apply to existing rows. + + * `upsert`: Specifies that the connector adds events to the table using + upsert semantics. That is, if the primary key does not exist, the connector + performs an INSERT operation, and if the key does exist, the connector + performs an UPDATE operation. When idempotent writes are required, the + connector should be configured to use this option. + + ' + displayName: Target SGCluster Debezium Properties Insert Mode + path: target.sgCluster.debeziumProperties.insertMode + - description: 'Default `record_key`. Specifies how the connector resolves + the primary key columns from the event. + + * `none`: Specifies that no primary key columns are created. + + * `record_key`: Specifies that the primary key columns are sourced from + the event’s record key. If the record key is a primitive type, the `primaryKeyFields` + property is required to specify the name of the primary key column. + If the record key is a struct type, the `primaryKeyFields` property + is optional, and can be used to specify a subset of columns from the + event’s key as the table’s primary key. + + * `record_value`: Specifies that the primary key columns is sourced + from the event’s value. You can set the `primaryKeyFields` property + to define the primary key as a subset of fields from the event’s value; + otherwise all fields are used by default. + + ' + displayName: Target SGCluster Debezium Properties Primary Key Mode + path: target.sgCluster.debeziumProperties.primaryKeyMode + - displayName: Target SGCluster Debezium Properties Primary Key Fields + path: target.sgCluster.debeziumProperties.primaryKeyFields + - description: 'Default `true`. Specifies whether generated SQL statements + use quotation marks to delimit table and column names. See the Quoting + and case sensitivity section for more details. + + ' + displayName: Target SGCluster Debezium Properties Quote Identifiers + path: target.sgCluster.debeziumProperties.quoteIdentifiers + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `basic`. Specifies how the connector evolves the + destination table schemas. For more information, see Schema evolution. + The following options are available: + + `none`: Specifies that the connector does not evolve the destination + schema. + + `basic`: Specifies that basic evolution occurs. The connector adds missing + columns to the table by comparing the incoming event’s record schema + to the database table structure. + + ' + displayName: Target SGCluster Debezium Properties Schema Evolution + path: target.sgCluster.debeziumProperties.schemaEvolution + - description: 'Default `${original}`. Specifies a string that determines + how the destination table name is formatted, based on the topic name + of the event. The placeholder ${original} is replaced with the schema + name and the table name separated by a point character (`.`). + + ' + displayName: Target SGCluster Debezium Properties Table Name Format + path: target.sgCluster.debeziumProperties.tableNameFormat + - description: 'Default `public`. Specifies the schema name where the PostgreSQL + PostGIS extension is installed. The default is `public`; however, if + the PostGIS extension was installed in another schema, this property + should be used to specify the alternate schema name. + + ' + displayName: Target SGCluster Debezium Properties Dialect Postgres Postgis + Schema + path: target.sgCluster.debeziumProperties.dialectPostgresPostgisSchema + - description: 'Default `false`. Specifies whether the connector automatically + sets an IDENTITY_INSERT before an INSERT or UPSERT operation into the + identity column of SQL Server tables, and then unsets it immediately + after the operation. When the default setting (`false`) is in effect, + an INSERT or UPSERT operation into the IDENTITY column of a table results + in a SQL exception. + + ' + displayName: Target SGCluster Debezium Properties Dialect Sqlserver Identity + Insert + path: target.sgCluster.debeziumProperties.dialectSqlserverIdentityInsert + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Default `500`. Specifies how many records to attempt to + batch together into the destination table. + + > Note that if you set `consumerMaxPollRecords` in the Connect worker + properties to a value lower than `batchSize`, batch processing will + be caped by `consumerMaxPollRecords` and the desired `batchSize` won’t + be reached. You can also configure the connector’s underlying consumer’s + `maxPollRecords` using `consumerOverrideMaxPollRecords` in the connector + configuration. + + ' + displayName: Target SGCluster Debezium Properties Batch Size + path: target.sgCluster.debeziumProperties.batchSize + - description: 'Default `io.debezium.connector.jdbc.naming.DefaultColumnNamingStrategy`. + Specifies the fully-qualified class name of a ColumnNamingStrategy implementation + that the connector uses to resolve column names from event field names. + + By default, the connector uses the field name as the column name. + + ' + displayName: Target SGCluster Debezium Properties Column Naming Strategy + path: target.sgCluster.debeziumProperties.columnNamingStrategy + - description: 'Default `io.stackgres.stream.jobs.migration.StreamMigrationTableNamingStrategy`. + Specifies the fully-qualified class name of a TableNamingStrategy implementation + that the connector uses to resolve table names from incoming event topic + names. + + The default behavior is to: + + * Replace the ${topic} placeholder in the `tableNameFormat` configuration + property with the event’s topic. + + * Sanitize the table name by replacing dots (`.`) with underscores (`_`). + + ' + displayName: Target SGCluster Debezium Properties Table Naming Strategy + path: target.sgCluster.debeziumProperties.tableNamingStrategy + - description: 'The maximum number of retries the streaming operation is + allowed to do after a failure. + + + A value of `0` (zero) means no retries are made. A value of `-1` means + retries are unlimited. Defaults to: `-1`. + + ' + displayName: Max Retries + path: maxRetries + - description: 'Size of the PersistentVolume for stream Pod. This size is + specified either in Mebibytes, Gibibytes or Tebibytes (multiples of + 2^20, 2^30 or 2^40, respectively). + + ' + displayName: Pods Persistent Volume Size + path: pods.persistentVolume.size + - description: 'Name of an existing StorageClass in the Kubernetes cluster, + used to create the PersistentVolume for stream. + + ' + displayName: Pods Persistent Volume Storage Class + path: pods.persistentVolume.storageClass + - description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource available + inside a container. + displayName: Pods Resources Claims Name + path: pods.resources.claims.name + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Pods Resources Limits + path: pods.resources.limits + - description: "Quantity is a fixed-point representation of a number. It\ + \ provides convenient marshaling/unmarshaling in JSON and YAML, in addition\ + \ to String() and AsInt64() accessors.\n\nThe serialization format is:\n\ + \n``` ::= \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::= | \ + \ ::= | . | . | .\ + \ ::= \"+\" | \"-\" ::= \ + \ | ::= | \ + \ | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in magnitude, nor may it\ + \ have more than 3 decimal places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This\ + \ may be extended in the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will remember the type\ + \ of suffix it had, and will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n- No precision is\ + \ lost - No fractional digits will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented by a floating point number. That\ + \ is the whole point of this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed, but will be re-emitted\ + \ in their canonical form. (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult to use these numbers\ + \ without writing some sort of special handling code in the hopes that\ + \ that will cause implementors to also use a fixed point implementation." + displayName: Pods Resources Requests + path: pods.resources.requests + - displayName: Pods Scheduling Node Selector + path: pods.scheduling.nodeSelector + - description: Effect indicates the taint effect to match. Empty means match + all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + displayName: Pods Scheduling Tolerations Effect + path: pods.scheduling.tolerations.effect + - description: Key is the taint key that the toleration applies to. Empty + means match all taint keys. If the key is empty, operator must be Exists; + this combination means to match all values and all keys. + displayName: Pods Scheduling Tolerations Key + path: pods.scheduling.tolerations.key + - description: Operator represents a key's relationship to the value. Valid + operators are Exists and Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate all taints of a particular + category. + displayName: Pods Scheduling Tolerations Operator + path: pods.scheduling.tolerations.operator + - description: TolerationSeconds represents the period of time the toleration + (which must be of effect NoExecute, otherwise this field is ignored) + tolerates the taint. By default, it is not set, which means tolerate + the taint forever (do not evict). Zero and negative values will be treated + as 0 (evict immediately) by the system. + displayName: Pods Scheduling Tolerations Toleration Seconds + path: pods.scheduling.tolerations.tolerationSeconds + - description: Value is the taint value the toleration matches to. If the + operator is Exists, the value should be empty, otherwise just a regular + string. + displayName: Pods Scheduling Tolerations Value + path: pods.scheduling.tolerations.value + - description: The label key that the selector applies to. + displayName: Pods Scheduling Node Affinity Preferred During Scheduling + Ignored During Execution Preference Match Expressions Key + path: pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Pods Scheduling Node Affinity Preferred During Scheduling + Ignored During Execution Preference Match Expressions Operator + path: pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.operator + - displayName: Pods Scheduling Node Affinity Preferred During Scheduling + Ignored During Execution Preference Match Expressions Values + path: pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Pods Scheduling Node Affinity Preferred During Scheduling + Ignored During Execution Preference Match Fields Key + path: pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Pods Scheduling Node Affinity Preferred During Scheduling + Ignored During Execution Preference Match Fields Operator + path: pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.operator + - displayName: Pods Scheduling Node Affinity Preferred During Scheduling + Ignored During Execution Preference Match Fields Values + path: pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields.values + - description: Weight associated with matching the corresponding nodeSelectorTerm, + in the range 1-100. + displayName: Pods Scheduling Node Affinity Preferred During Scheduling + Ignored During Execution Weight + path: pods.scheduling.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: The label key that the selector applies to. + displayName: Pods Scheduling Node Affinity Required During Scheduling + Ignored During Execution Node Selector Terms Match Expressions Key + path: pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Pods Scheduling Node Affinity Required During Scheduling + Ignored During Execution Node Selector Terms Match Expressions Operator + path: pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.operator + - displayName: Pods Scheduling Node Affinity Required During Scheduling + Ignored During Execution Node Selector Terms Match Expressions Values + path: pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions.values + - description: The label key that the selector applies to. + displayName: Pods Scheduling Node Affinity Required During Scheduling + Ignored During Execution Node Selector Terms Match Fields Key + path: pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.key + - description: Represents a key's relationship to a set of values. Valid + operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + displayName: Pods Scheduling Node Affinity Required During Scheduling + Ignored During Execution Node Selector Terms Match Fields Operator + path: pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.operator + - displayName: Pods Scheduling Node Affinity Required During Scheduling + Ignored During Execution Node Selector Terms Match Fields Values + path: pods.scheduling.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields.values + - description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which indicate + the highest priorities with the former being the highest priority. Any + other name must be defined by creating a PriorityClass object with that + name. If not specified, the pod priority will be default or zero if + there is no default. + displayName: Pods Scheduling Priority Class Name + path: pods.scheduling.priorityClassName + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Key + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Operator + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Values + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Labels + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Match Label Keys + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Mismatch Label Keys + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Key + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Operator + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Values + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Labels + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespaces + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Topology Key + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Pods Scheduling Pod Affinity Preferred During Scheduling + Ignored During Execution Weight + path: pods.scheduling.podAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Key + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Operator + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Expressions Values + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Label Selector Match Labels + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Match Label Keys + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Mismatch Label Keys + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Key + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Operator + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Expressions Values + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespace Selector Match Labels + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Namespaces + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Pods Scheduling Pod Affinity Required During Scheduling Ignored + During Execution Topology Key + path: pods.scheduling.podAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Key + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Operator + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Expressions + Values + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Label Selector Match Labels + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector.matchLabels + - displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Match Label Keys + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.matchLabelKeys + - displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Mismatch Label Keys + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Key + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Operator + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Expressions Values + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespace Selector Match + Labels + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaceSelector.matchLabels + - displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Namespaces + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Pod Affinity Term Topology Key + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey + - description: weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + displayName: Pods Scheduling Pod Anti Affinity Preferred During Scheduling + Ignored During Execution Weight + path: pods.scheduling.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.weight + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Label Selector Match Expressions Key + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Label Selector Match Expressions Operator + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Label Selector Match Expressions Values + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Label Selector Match Labels + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.labelSelector.matchLabels + - displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Match Label Keys + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.matchLabelKeys + - displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Mismatch Label Keys + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.mismatchLabelKeys + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Namespace Selector Match Expressions Key + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Namespace Selector Match Expressions Operator + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.operator + - displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Namespace Selector Match Expressions Values + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchExpressions.values + - displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Namespace Selector Match Labels + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaceSelector.matchLabels + - displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Namespaces + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.namespaces + - description: This pod should be co-located (affinity) or not co-located + (anti-affinity) with the pods matching the labelSelector in the specified + namespaces, where co-located is defined as running on a node whose value + of the label with key topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey is not allowed. + displayName: Pods Scheduling Pod Anti Affinity Required During Scheduling + Ignored During Execution Topology Key + path: pods.scheduling.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution.topologyKey + - description: key is the label key that the selector applies to. + displayName: Pods Scheduling Topology Spread Constraints Label Selector + Match Expressions Key + path: pods.scheduling.topologySpreadConstraints.labelSelector.matchExpressions.key + - description: operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + displayName: Pods Scheduling Topology Spread Constraints Label Selector + Match Expressions Operator + path: pods.scheduling.topologySpreadConstraints.labelSelector.matchExpressions.operator + - displayName: Pods Scheduling Topology Spread Constraints Label Selector + Match Expressions Values + path: pods.scheduling.topologySpreadConstraints.labelSelector.matchExpressions.values + - displayName: Pods Scheduling Topology Spread Constraints Label Selector + Match Labels + path: pods.scheduling.topologySpreadConstraints.labelSelector.matchLabels + - displayName: Pods Scheduling Topology Spread Constraints Match Label Keys + path: pods.scheduling.topologySpreadConstraints.matchLabelKeys + - description: 'MaxSkew describes the degree to which pods may be unevenly + distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum + permitted difference between the number of matching pods in the target + topology and the global minimum. The global minimum is the minimum number + of matching pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread as 2/2/1: In + this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P + P | P | - if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the + ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is + 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy it. + It''s a required field. Default value is 1 and 0 is not allowed.' + displayName: Pods Scheduling Topology Spread Constraints Max Skew + path: pods.scheduling.topologySpreadConstraints.maxSkew + - description: 'MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less + than minDomains, Pod Topology Spread treats "global minimum" as 0, and + then the calculation of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, when the number + of eligible domains is less than minDomains, scheduler won''t schedule + more than maxSkew Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are integers greater + than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is + set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 + | zone2 | zone3 | | P P | P P | P P | The number of domains is + less than 5(MinDomains), so "global minimum" is treated as 0. In this + situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any + of the three zones, it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default).' + displayName: Pods Scheduling Topology Spread Constraints Min Domains + path: pods.scheduling.topologySpreadConstraints.minDomains + - description: 'NodeAffinityPolicy indicates how we will treat Pod''s nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: - Honor: only + nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included + in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + displayName: Pods Scheduling Topology Spread Constraints Node Affinity + Policy + path: pods.scheduling.topologySpreadConstraints.nodeAffinityPolicy + - description: 'NodeTaintsPolicy indicates how we will treat node taints + when calculating pod topology spread skew. Options are: - Honor: nodes + without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: node taints are ignored. All + nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + displayName: Pods Scheduling Topology Spread Constraints Node Taints Policy + path: pods.scheduling.topologySpreadConstraints.nodeTaintsPolicy + - description: TopologyKey is the key of node labels. Nodes that have a + label with this key and identical values are considered to be in the + same topology. We consider each as a "bucket", and try + to put balanced number of pods into each bucket. We define a domain + as a particular instance of a topology. Also, we define an eligible + domain as a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", + each zone is a domain of that topology. It's a required field. + displayName: Pods Scheduling Topology Spread Constraints Topology Key + path: pods.scheduling.topologySpreadConstraints.topologyKey + - description: "WhenUnsatisfiable indicates how to deal with a pod if it\ + \ doesn't satisfy the spread constraint. - DoNotSchedule (default) tells\ + \ the scheduler not to schedule it. - ScheduleAnyway tells the scheduler\ + \ to schedule the pod in any location,\n but giving higher precedence\ + \ to topologies that would help reduce the\n skew.\nA constraint is\ + \ considered \"Unsatisfiable\" for an incoming pod if and only if every\ + \ possible node assignment for that pod would violate \"MaxSkew\" on\ + \ some topology. For example, in a 3-zone cluster, MaxSkew is set to\ + \ 1, and pods with the same labelSelector spread as 3/1/1: | zone1 |\ + \ zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is\ + \ set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3)\ + \ to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\ + \ MaxSkew(1). In other words, the cluster can still be imbalanced, but\ + \ scheduler won't make it *more* imbalanced. It's a required field." + displayName: Pods Scheduling Topology Spread Constraints When Unsatisfiable + path: pods.scheduling.topologySpreadConstraints.whenUnsatisfiable + - description: 'Default `io.debezium.engine.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy`. + The name of the Java class of the commit policy. It defines when offsets + commit has to be triggered based on the number of events processed and + the time elapsed since the last commit. This class must implement the + interface OffsetCommitPolicy. The default is a periodic commity policy + based upon time intervals. + + ' + displayName: Debezium Engine Properties Offset Commit Policy + path: debeziumEngineProperties.offsetCommitPolicy + - description: 'Default `60000`. Interval at which to try committing offsets. + The default is 1 minute. + + ' + displayName: Debezium Engine Properties Offset Flush Interval Ms + path: debeziumEngineProperties.offsetFlushIntervalMs + - description: 'Default `5000`. Maximum number of milliseconds to wait for + records to flush and partition offset data to be committed to offset + storage before cancelling the process and restoring the offset data + to be committed in a future attempt. The default is 5 seconds. + + ' + displayName: Debezium Engine Properties Offset Flush Timeout Ms + path: debeziumEngineProperties.offsetFlushTimeoutMs + - description: 'Default `-1`. The maximum number of retries on connection + errors before failing (-1 = no limit, 0 = disabled, > 0 = num of retries). + + ' + displayName: Debezium Engine Properties Errors Max Retries + path: debeziumEngineProperties.errorsMaxRetries + - description: 'Default `300`. Initial delay (in ms) for retries when encountering + connection errors. This value will be doubled upon every retry but won’t + exceed errorsRetryDelayMaxMs. + + ' + displayName: Debezium Engine Properties Errors Retry Delay Initial Ms + path: debeziumEngineProperties.errorsRetryDelayInitialMs + - description: 'Default `10000`. Max delay (in ms) between retries when + encountering conn + + ' + displayName: Debezium Engine Properties Errors Retry Delay Max Ms + path: debeziumEngineProperties.errorsRetryDelayMaxMs + - displayName: Debezium Engine Properties Transforms + path: debeziumEngineProperties.transforms + - displayName: Debezium Engine Properties Predicates + path: debeziumEngineProperties.predicates + statusDescriptors: + - description: Last time the condition transitioned from one status to another. + displayName: Conditions Last Transition Time + path: conditions.lastTransitionTime + - description: A human-readable message indicating details about the transition. + displayName: Conditions Message + path: conditions.message + - description: The reason for the condition last transition. + displayName: Conditions Reason + path: conditions.reason + - description: Status of the condition, one of `True`, `False` or `Unknown`. + displayName: Conditions Status + path: conditions.status + - description: Type of deployment condition. + displayName: Conditions Type + path: conditions.type + - description: 'The last snapshot event that the connector has read. + + ' + displayName: Snapshot Last Event + path: snapshot.lastEvent + - description: 'The number of milliseconds since the connector has read + and processed the most recent event. + + ' + displayName: Snapshot Milli Seconds Since Last Event + path: snapshot.milliSecondsSinceLastEvent + - description: 'The total number of events that this connector has seen + since last started or reset. + + ' + displayName: Snapshot Total Number Of Events Seen + path: snapshot.totalNumberOfEventsSeen + - description: 'The number of events that have been filtered by include/exclude + list filtering rules configured on the connector. + + ' + displayName: Snapshot Number Of Events Filtered + path: snapshot.numberOfEventsFiltered + - displayName: Snapshot Captured Tables + path: snapshot.capturedTables + - description: 'The length the queue used to pass events between the snapshotter + and the main Kafka Connect loop. + + ' + displayName: Snapshot Queue Total Capacity + path: snapshot.queueTotalCapacity + - description: 'The free capacity of the queue used to pass events between + the snapshotter and the main Kafka Connect loop. + + ' + displayName: Snapshot Queue Remaining Capacity + path: snapshot.queueRemainingCapacity + - description: 'The total number of tables that are being included in the + snapshot. + + ' + displayName: Snapshot Total Table Count + path: snapshot.totalTableCount + - description: 'The number of tables that the snapshot has yet to copy. + + ' + displayName: Snapshot Remaining Table Count + path: snapshot.remainingTableCount + - description: 'Whether the snapshot was started. + + ' + displayName: Snapshot Snapshot Running + path: snapshot.snapshotRunning + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Whether the snapshot was paused. + + ' + displayName: Snapshot Snapshot Paused + path: snapshot.snapshotPaused + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Whether the snapshot was aborted. + + ' + displayName: Snapshot Snapshot Aborted + path: snapshot.snapshotAborted + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'Whether the snapshot completed. + + ' + displayName: Snapshot Snapshot Completed + path: snapshot.snapshotCompleted + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The total number of seconds that the snapshot has taken + so far, even if not complete. Includes also time when snapshot was paused. + + ' + displayName: Snapshot Snapshot Duration In Seconds + path: snapshot.snapshotDurationInSeconds + - description: 'The total number of seconds that the snapshot was paused. + If the snapshot was paused several times, the paused time adds up. + + ' + displayName: Snapshot Snapshot Paused Duration In Seconds + path: snapshot.snapshotPausedDurationInSeconds + - displayName: Snapshot Rows Scanned + path: snapshot.rowsScanned + - description: 'The maximum buffer of the queue in bytes. This metric is + available if max.queue.size.in.bytes is set to a positive long value. + + ' + displayName: Snapshot Max Queue Size In Bytes + path: snapshot.maxQueueSizeInBytes + - description: 'The current volume, in bytes, of records in the queue. + + ' + displayName: Snapshot Current Queue Size In Bytes + path: snapshot.currentQueueSizeInBytes + - description: 'The identifier of the current snapshot chunk. + + ' + displayName: Snapshot Chunk Id + path: snapshot.chunkId + - description: 'The lower bound of the primary key set defining the current + chunk. + + ' + displayName: Snapshot Chunk From + path: snapshot.chunkFrom + - description: 'The upper bound of the primary key set defining the current + chunk. + + ' + displayName: Snapshot Chunk To + path: snapshot.chunkTo + - description: 'The lower bound of the primary key set of the currently + snapshotted table. + + ' + displayName: Snapshot Table From + path: snapshot.tableFrom + - description: 'The upper bound of the primary key set of the currently + snapshotted table. + + ' + displayName: Snapshot Table To + path: snapshot.tableTo + - description: 'The last streaming event that the connector has read. + + ' + displayName: Streaming Last Event + path: streaming.lastEvent + - description: 'The number of milliseconds since the connector has read + and processed the most recent event. + + ' + displayName: Streaming Milli Seconds Since Last Event + path: streaming.milliSecondsSinceLastEvent + - description: 'The total number of events that this connector has seen + since the last start or metrics reset. + + ' + displayName: Streaming Total Number Of Events Seen + path: streaming.totalNumberOfEventsSeen + - description: 'The total number of create events that this connector has + seen since the last start or metrics reset. + + ' + displayName: Streaming Total Number Of Create Events Seen + path: streaming.totalNumberOfCreateEventsSeen + - description: 'The total number of update events that this connector has + seen since the last start or metrics reset. + + ' + displayName: Streaming Total Number Of Update Events Seen + path: streaming.totalNumberOfUpdateEventsSeen + - description: 'The total number of delete events that this connector has + seen since the last start or metrics reset. + + ' + displayName: Streaming Total Number Of Delete Events Seen + path: streaming.totalNumberOfDeleteEventsSeen + - description: 'The number of events that have been filtered by include/exclude + list filtering rules configured on the connector. + + ' + displayName: Streaming Number Of Events Filtered + path: streaming.numberOfEventsFiltered + - displayName: Streaming Captured Tables + path: streaming.capturedTables + - description: 'The length the queue used to pass events between the streamer + and the main Kafka Connect loop. + + ' + displayName: Streaming Queue Total Capacity + path: streaming.queueTotalCapacity + - description: 'The free capacity of the queue used to pass events between + the streamer and the main Kafka Connect loop. + + ' + displayName: Streaming Queue Remaining Capacity + path: streaming.queueRemainingCapacity + - description: 'Flag that denotes whether the connector is currently connected + to the database server. + + ' + displayName: Streaming Connected + path: streaming.connected + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The number of milliseconds between the last change event’s + timestamp and the connector processing it. The values will incoporate + any differences between the clocks on the machines where the database + server and the connector are running. + + ' + displayName: Streaming Milli Seconds Behind Source + path: streaming.milliSecondsBehindSource + - description: 'The number of processed transactions that were committed. + + ' + displayName: Streaming Number Of Committed Transactions + path: streaming.numberOfCommittedTransactions + - displayName: Streaming Source Event Position + path: streaming.sourceEventPosition + - description: 'Transaction identifier of the last processed transaction. + + ' + displayName: Streaming Last Transaction Id + path: streaming.lastTransactionId + - description: 'The maximum buffer of the queue in bytes. This metric is + available if max.queue.size.in.bytes is set to a positive long value. + + ' + displayName: Streaming Max Queue Size In Bytes + path: streaming.maxQueueSizeInBytes + - description: 'The current volume, in bytes, of records in the queue. + + ' + displayName: Streaming Current Queue Size In Bytes + path: streaming.currentQueueSizeInBytes + - description: 'It is true if the last event that the stream has tried to + send since the last start or metrics reset was sent successfully. + + ' + displayName: Events Last Event Was Sent + path: events.lastEventWasSent + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: 'The last event that the stream has sent since the last start + or metrics reset. + + ' + displayName: Events Last Event Sent + path: events.lastEventSent + - description: 'The total number of events that this stream has sent since + the last start or metrics reset. + + ' + displayName: Events Total Number Of Events Sent + path: events.totalNumberOfEventsSent + - description: 'The last error seen sending events that this stream has + seen since the last start or metrics reset. + + ' + displayName: Events Last Error Seen + path: events.lastErrorSeen + - description: 'The total number of errors sending events that this stream + has seen since the last start or metrics reset. + + ' + displayName: Events Total Number Of Errors Seen + path: events.totalNumberOfErrorsSeen + - description: The failure message + displayName: Failure + path: failure + version: v1alpha1 + description: ' + + [StackGres](https://stackgres.io) is the **Stack** required for enterprise production + Post**Gres**. A fully-featured platform to run Postgres on Kubernetes. Fully Open + Source, StackGres supports both a declarative approach suitable for GitOps workflows + and a complete Web Console for the best user experience. + + + Built by [OnGres](https://ongres.com) ("**On** Post**Gres**"), StackGres requires + little to no prior Postgres experience. StackGres can perform fully automated + deployments; fully automated database operations ("Day 2 operations") and comes + with advanced database tuning by default. Yet remaining highly customizable for + Postgres expert DBAs. + + + [StackGres features](https://stackgres.io/features/) include, among others: + + + * **High Availability with automated failover**. StackGres relies on [Patroni](https://github.com/zalando/patroni), + and its built-in and fully automatic. + + * **Integrated connection pooling**. Built-in, by default, like it should be for + production workloads. + + * **Automatic backups with lifecycle policies**. Backup your clusters automatically + to any object store. Apply retention policies. Restoration supports PITR. + + * **Advanced replication modes**, including async, sync and group replication. + It also supports cascading replication and standby clusters on separate Kubernetes + clusters for disaster recovery. + + * **More than 150 Postgres extensions**. The Postgres platform with [the largest + number of extensions in the world](https://stackgres.io/extensions/). With new + extensions added continuously. + + * **Observability**. Fully integrated with the Prometheus stack. Includes pre-defined, + Postgres-specific dashboards and alerts. + + * **Fully-featured Web Console**. Perform any operation from the Web Console. + Supports SSO, fine-grained RBAC and a REST API. + + * **Distributed Logs**. StackGres developed a mechanism to ship logs from all + pods to a central log server, managed by StackGres, that store logs in Postgres. + Query your logs with SQL or from the Web Console! + + * **Automated Day 2 Operations**. Minor and major version upgrades, container + upgrades, controlled restart, vacuum, repack, even benchmarks! + + * **Expertly tuned by default**. From the creators of [CONF](https://postgresqlco.nf), + StackGres pre-tunes your Postgres servers with more than 40 parameters tuned by + default. + + * **100% Open Source**. No "premium version with advanced features", no production + usage restrictions. Just Open Source. + + * **[24/7 Support](https://stackgres.io/pricing/) Available from OnGres** + + + ## Installation and documentation + + + Installation: + + * For a quick test, you can follow our [quickstart](https://stackgres.io/doc/latest/demo/quickstart/). + + * [Production installations](https://stackgres.io/doc/latest/install/). + + + All the documentation is available at [stackgres.io/doc](https://stackgres.io/doc/latest/install/). + + + Join the [Slack](https://slack.stackgres.io) and/or [Discord](https://discord.stackgres.io) + Public Communities for Community support. + + ' + displayName: StackGres + icon: + - base64data: 'PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCA1MDAg + + NTAwIiBzdHlsZT0iZW5hYmxlLWJhY2tncm91bmQ6bmV3IDAgMCA1MDAgNTAwIiB4bWw6c3BhY2U9 + + InByZXNlcnZlIj4KICAgIDxzd2l0Y2g+CiAgICAgICAgPGc+CiAgICAgICAgICAgIDxwYXRoIHN0 + + eWxlPSJmaWxsOiM0MmE4YzgiIGQ9Im01LjIgMjczLjcgMTEwLjcgMTI2LjhoMjY4LjJsMTEwLjct + + MTI2LjgtMTEwLjctMTI2LjhIMTE1Ljl6Ii8+CiAgICAgICAgICAgIDxwYXRoIHN0eWxlPSJmaWxs + + OiM0MjZkODgiIGQ9Ik0xMTUuOSA0MDAuNWgyNjguNHY1Ni40SDExNS45eiIvPgogICAgICAgICAg + + ICA8cGF0aCBzdHlsZT0iZmlsbDojNDI4YmI0IiBkPSJNMTE1LjkgNDU2LjggNS4yIDMzMHYtNTYu + + M2wxMTAuNyAxMjYuOHoiLz4KICAgICAgICAgICAgPHBhdGggc3R5bGU9ImZpbGw6IzE2NjU3YyIg + + ZD0iTTM4NC4xIDQ1Ni44IDQ5NC44IDMzMHYtNTYuM0wzODQuMSA0MDAuNXoiLz4KICAgICAgICAg + + ICAgPHBhdGggZD0iTTQ2NS43IDI1My40YzAtNDctOTYuNi04NS4yLTIxNS43LTg1LjJTMzQuNCAy + + MDYuMyAzNC40IDI1My40czk2LjUgODUuMiAyMTUuNiA4NS4yIDIxNS43LTM4LjIgMjE1LjctODUu + + MnoiIHN0eWxlPSJmaWxsOiMzOWI1NGEiLz4KICAgICAgICAgICAgPHBhdGggZD0iTTQ2NS43IDI1 + + My40YzAgNDctOTYuNiA4NS4yLTIxNS43IDg1LjJTMzQuNCAzMDAuNCAzNC40IDI1My40djQ2Ljlj + + MTQuOSA0MS4zIDEwNi41IDg1LjIgMjE1LjYgODUuMnMyMDAuOC00My45IDIxNS43LTg1LjJ2LTQ2 + + Ljl6IiBzdHlsZT0iZmlsbDojMDA5MjQ1Ii8+CiAgICAgICAgICAgIDxwYXRoIHN0eWxlPSJmaWxs + + OiNmMmM2M2YiIGQ9Ik0xNi4zIDE3OC42IDI1MCAzMTQuMWwyMzMuOC0xMzUuNUwyNTAgNDMuMnoi + + Lz4KICAgICAgICAgICAgPHBhdGggc3R5bGU9ImZpbGw6I2YyYjEzNiIgZD0iTTE2LjMgMTc4LjZ2 + + NTIuOEwyNTAgMzY2Ljl2LTUyLjh6Ii8+CiAgICAgICAgICAgIDxwYXRoIHN0eWxlPSJmaWxsOiNm + + MmExMzAiIGQ9Ik00ODMuOCAxNzguNiAyNTAgMzE0LjF2NTIuOGwyMzMuOC0xMzUuNXoiLz4KICAg + + ICAgICAgICAgPHBhdGggc3R5bGU9ImZpbGw6I2ZmNzEyNCIgZD0ibTY4IDIxMi40IDM2NC4xLTUz + + LTkyLjQtMTA2eiIvPgogICAgICAgICAgICA8cGF0aCBzdHlsZT0iZmlsbDojZDkzZDFiIiBkPSJt + + NjggMjEyLjQgMzY0LjEtNTN2NDcuOEw2OCAyNjAuMXoiLz4KICAgICAgICA8L2c+CiAgICA8L3N3 + + aXRjaD4KPC9zdmc+Cg== + + ' + mediatype: image/svg+xml + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - list + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - apiGroups: + - apiextensions.k8s.io + resourceNames: + - sgconfigs.stackgres.io + - sgclusters.stackgres.io + - sginstanceprofiles.stackgres.io + - sgpgconfigs.stackgres.io + - sgpoolconfigs.stackgres.io + - sgbackups.stackgres.io + - sgbackupconfigs.stackgres.io + - sgobjectstorages.stackgres.io + - sgdbops.stackgres.io + - sgdistributedlogs.stackgres.io + - sgshardedclusters.stackgres.io + - sgshardedbackups.stackgres.io + - sgshardeddbops.stackgres.io + - sgscripts.stackgres.io + - sgstreams.stackgres.io + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - apiGroups: + - apiextensions.k8s.io + resourceNames: + - prometheuses.monitoring.coreos.com + resources: + - customresourcedefinitions + verbs: + - get + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - create + - watch + - list + - get + - update + - patch + - delete + - apiGroups: + - '' + resources: + - users + - groups + verbs: + - impersonate + - apiGroups: + - operators.coreos.com + resources: + - operators + verbs: + - list + - get + serviceAccountName: stackgres-operator + - rules: + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + - apiGroups: + - '' + resources: + - users + - groups + verbs: + - impersonate + serviceAccountName: stackgres-restapi + deployments: + - label: + app: stackgres-operator + app.kubernetes.io/component: operator + app.kubernetes.io/created-by: stackgres + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: stackgres + group: stackgres.io + name: stackgres-operator + spec: + replicas: 1 + selector: + matchLabels: + app: stackgres-operator + group: stackgres.io + strategy: {} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: operator + labels: + app: stackgres-operator + group: stackgres.io + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: REMOVE_OLD_OPERATOR_BUNDLE_RESOURCES + value: 'true' + - name: INSTALL_CONFIG + value: 'true' + - name: DISABLE_RESTAPI_SERVICE_ACCOUNT_IF_NOT_EXISTS + value: 'true' + - name: OPERATOR_CERT_FILE + value: /tmp/k8s-webhook-server/serving-certs/tls.crt + - name: OPERATOR_KEY_FILE + value: /tmp/k8s-webhook-server/serving-certs/tls.key + - name: OPERATOR_NAME + value: stackgres-operator + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: OPERATOR_IMAGE_VERSION + value: 1.15.0-rc1 + - name: OPERATOR_JVM_IMAGE_VERSION + value: 1.15.0-rc1-jvm + - name: OPERATOR_NATIVE_IMAGE_VERSION + value: 1.15.0-rc1 + - name: OPERATOR_SERVICE_ACCOUNT + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.serviceAccountName + - name: OPERATOR_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: OPERATOR_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: OPERATOR_VERSION + value: 1.15.0-rc1 + - name: ALLOWED_NAMESPACES + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + image: quay.io/stackgres/operator:1.15.0-rc1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /q/health/live + port: 8080 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 60 + timeoutSeconds: 10 + name: operator + ports: + - containerPort: 8080 + name: http + protocol: TCP + - containerPort: 8443 + name: https + protocol: TCP + readinessProbe: + httpGet: + path: /q/health/ready + port: 8080 + scheme: HTTP + periodSeconds: 2 + timeoutSeconds: 1 + resources: + limits: + cpu: '1' + memory: 512Mi + requests: + cpu: 10m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + securityContext: + runAsNonRoot: true + serviceAccountName: stackgres-operator + terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: stackgres-operator-cert + permissions: + - rules: + - apiGroups: + - '' + - apps + - extensions + - rbac.authorization.k8s.io + - batch + resources: + - pods + - pods/exec + - pods/log + - services + - endpoints + - endpoints/restricted + - persistentvolumeclaims + - configmaps + - secrets + - deployments + - statefulsets + - serviceaccounts + - namespaces + - roles + - rolebindings + - events + - cronjobs + - jobs + verbs: + - get + - list + - watch + - update + - create + - delete + - deletecollection + - patch + - apiGroups: + - stackgres.io + resources: + - sgclusters + - sgpgconfigs + - sginstanceprofiles + - sgpoolconfigs + - sgbackupconfigs + - sgbackups + - sgdistributedlogs + - sgdbops + - sgobjectstorages + - sgscripts + - sgshardedclusters + - sgshardedbackups + - sgshardeddbops + - sgstreams + - sgconfigs + verbs: + - create + - watch + - list + - get + - update + - patch + - delete + - apiGroups: + - stackgres.io + resources: + - sgconfigs/status + - sgclusters/status + - sgdistributedlogs/status + - sgclusters/finalizers + - sgpgconfigs/finalizers + - sginstanceprofiles/finalizers + - sgpoolconfigs/finalizers + - sgbackupconfigs/finalizers + - sgbackups/finalizers + - sgdistributedlogs/finalizers + - sgdbops/finalizers + - sgobjectstorages/finalizers + - sgscripts/finalizers + - sgshardedclusters/finalizers + - sgshardedbackups/finalizers + - sgshardeddbops/finalizers + - sgstreams/finalizers + - sgconfigs/finalizers + verbs: + - update + - apiGroups: + - '' + - apps + - batch + resources: + - statefulsets/finalizers + - persistentvolumeclaims/finalizers + - deployments/finalizers + - services/finalizers + - endpoints/finalizers + - cronjobs/finalizers + - jobs/finalizers + - pods/finalizers + verbs: + - update + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - list + - get + - watch + - create + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - podmonitors + verbs: + - list + - get + - create + - delete + - update + - patch + - apiGroups: + - monitoring.coreos.com + resources: + - prometheus + - prometheuses + - podmonitors + verbs: + - list + - get + - apiGroups: + - shardingsphere.apache.org + resources: + - computenodes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - keda.sh + resources: + - scaledobjects + - triggerauthentications + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - autoscaling.k8s.io + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - serving.knative.dev + resources: + - services + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - operators.coreos.com + resources: + - operators + verbs: + - list + - get + serviceAccountName: stackgres-operator + - rules: + - apiGroups: + - '' + resources: + - secrets + verbs: + - get + - list + serviceAccountName: stackgres-restapi + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: true + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - postgresql + - postgres + - database + - sql + - rdbms + - open source + - ongres + labels: + operatorframework.io/arch.amd64: supported + operatorframework.io/arch.arm64: supported + operatorframework.io/os.linux: supported + links: + - name: StackGres Web + url: https://stackgres.io + - name: StackGres Docs + url: https://stackgres.io/doc + maintainers: + - email: stackgres@ongres.com + name: OnGres + maturity: stable + minKubeVersion: 1.18.0 + provider: + name: OnGres + url: https://ongres.com + version: 1.15.0-rc1 + webhookdefinitions: + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgbackup.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgbackups + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgbackup + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgbackup.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgbackups + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgbackup + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgbackupconfig.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgbackupconfigs + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgbackupconfig + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgbackupconfig.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgbackupconfigs + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgbackupconfig + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgcluster.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgclusters + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgcluster + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgcluster.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgclusters + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgcluster + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgdbops.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgdbops + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgdbops + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgdbops.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgdbops + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgdbops + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgdistributedlogs.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgdistributedlogs + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgdistributedlogs + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgdistributedlogs.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgdistributedlogs + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgdistributedlogs + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sginstanceprofile.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sginstanceprofiles + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sginstanceprofile + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sginstanceprofile.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sginstanceprofiles + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sginstanceprofile + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgobjectstorage.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgobjectstorages + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgobjectstorage + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgobjectstorage.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgobjectstorages + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgobjectstorage + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgpgconfig.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgpgconfigs + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgpgconfig + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgpgconfig.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgpgconfigs + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgpgconfig + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgpoolconfig.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgpoolconfigs + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgpoolconfig + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgpoolconfig.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgpoolconfigs + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgpoolconfig + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgscripts.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgscripts + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgscript + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgscripts.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgscripts + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgscript + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgshardedbackups.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgshardedbackups + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgshardedbackup + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgshardedbackups.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgshardedbackups + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgshardedbackup + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgshardedclusters.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgshardedclusters + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgshardedcluster + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgshardedclusters.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgshardedclusters + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgshardedcluster + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgshardeddbops.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgshardeddbops + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgshardeddbops + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgshardeddbops.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgshardeddbops + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgshardeddbops + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgstreams.mutating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - sgstreams + sideEffects: None + targetPort: 8443 + type: MutatingAdmissionWebhook + webhookPath: /stackgres/mutation/sgstreams + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: stackgres-operator + failurePolicy: Fail + generateName: sgstreams.validating-webhook.stackgres.io + rules: + - apiGroups: + - stackgres.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + - DELETE + resources: + - sgstreams + sideEffects: None + targetPort: 8443 + type: ValidatingAdmissionWebhook + webhookPath: /stackgres/validation/sgstreams + relatedImages: + - image: quay.io/ongres/kubectl:v1.31.3-build-6.38 + name: kubectl_1_30_0 + - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 + name: kube-rbac-proxy_0_13_0 + - image: quay.io/stackgres/operator:1.15.0-rc1 + name: stackgres-operator + - image: quay.io/stackgres/restapi:1.15.0-rc1 + name: stackgres-restapi + - image: quay.io/stackgres/admin-ui:1.15.0-rc1 + name: stackgres-admin-ui + - image: quay.io/stackgres/jobs:1.15.0-rc1 + name: stackgres-jobs diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgbackups.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgbackups.yaml new file mode 100644 index 00000000000..781a7e4a444 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgbackups.yaml @@ -0,0 +1,950 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgbackups.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGBackup + listKind: SGBackupList + plural: sgbackups + singular: sgbackup + shortNames: + - sgbkp + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: cluster + type: string + jsonPath: .spec.sgCluster + - name: managed + type: string + jsonPath: .spec.managedLifecycle + - name: status + type: string + jsonPath: .status.process.status + - name: pg-version + type: string + jsonPath: .status.backupInformation.postgresVersion + priority: 1 + - name: compressed-size + type: integer + format: byte + jsonPath: .status.backupInformation.size.compressed + priority: 1 + - name: timeline + type: string + jsonPath: .status.backupInformation.timeline + priority: 1 + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + description: "A manual or automatically generated backup of an SGCluster\ + \ configured with backups.\n\nWhen a SGBackup is created a Job will perform\ + \ a full backup of the database and update the status of the SGBackup\n\ + \ with the all the information required to restore it and some stats (or\ + \ a failure message in case something unexpected\n happened).\nBackup\ + \ generated by SGBackup are stored in the object storage configured with\ + \ an SGObjectStorage together with the WAL\n files or in a [VolumeSnapshot](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)\ + \ (separated from the WAL files that will be still stored in an object\ + \ storage)\n depending on the backup configuration of the targeted SGCluster.\n\ + After an SGBackup is created the same Job performs a reconciliation of\ + \ the backups by applying the retention window\n that has been configured\ + \ in the SGCluster and removing the backups with managed lifecycle and\ + \ the WAL files older\n than the ones that fit in the retention window.\ + \ The reconciliation also removes backups (excluding WAL files) that do\n\ + \ not belongs to any SGBackup (including copies). If the target storage\ + \ is changed deletion of an SGBackup backups with\n managed lifecycle\ + \ and the WAL files older than the ones that fit in the retention window\ + \ and of backups that do not\n belongs to any SGBackup will not be performed\ + \ anymore on the previous storage, only on the new target storage.\nIf\ + \ the reconciliation of backups fails the backup itself do not fail and\ + \ will be re-tried the next time a SGBackup\n or shecduled backup Job\ + \ take place.\n" + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 56 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the backup. Following [Kubernetes naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + The name must be unique across all StackGres backups in the same + namespace. + + ' + spec: + type: object + properties: + sgCluster: + type: string + description: "The name of the `SGCluster` from which this backup\ + \ is/will be taken.\n\nIf this is a copy of an existing completed\ + \ backup in a different namespace\n the value must be prefixed\ + \ with the namespace of the source backup and a\n dot `.` (e.g.\ + \ `.`) or have the same value\n\ + \ if the source backup is also a copy.\n" + managedLifecycle: + type: boolean + description: "Indicate if this backup is not permanent and should\ + \ be removed by the automated\n retention policy. Default is `false`.\n" + timeout: + type: integer + description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation will continue + until the backup completes or fail. If set to 0 is the same as + not being set. + + + Make sure to set a reasonable high value in order to allow for + any unexpected delays during backup creation (network low bandwidth, + disk low throughput and so forth). + + ' + reconciliationTimeout: + type: integer + default: 300 + description: "Allow to set a timeout for the reconciliation process\ + \ that take place after the backup.\n\nIf not set defaults to\ + \ 300 (5 minutes). If set to 0 it will disable timeout.\n\nFailure\ + \ of reconciliation will not make the backup fail and will be\ + \ re-tried the next time a SGBackup\n or shecduled backup Job\ + \ take place.\n" + maxRetries: + type: integer + description: 'The maximum number of retries the backup operation + is allowed to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: + `3`. + + ' + status: + type: object + properties: + internalName: + type: string + description: 'The name of the backup. + + ' + backupPath: + type: string + description: 'The path were the backup is stored. + + ' + process: + type: object + properties: + status: + type: string + description: 'Status of the backup. + + ' + failure: + type: string + description: 'If the status is `failed` this field will contain + a message indicating the failure reason. + + ' + jobPod: + type: string + description: 'Name of the pod assigned to the backup. StackGres + utilizes internally a locking mechanism based on the pod name + of the job that creates the backup. + + ' + managedLifecycle: + type: boolean + description: 'Status (may be transient) until converging to + `spec.managedLifecycle`. + + ' + timing: + type: object + properties: + start: + type: string + description: 'Start time of backup. + + ' + end: + type: string + description: 'End time of backup. + + ' + stored: + type: string + description: 'Time at which the backup is safely stored + in the object storage. + + ' + backupInformation: + type: object + properties: + hostname: + type: string + description: 'Hostname of the instance where the backup is taken + from. + + ' + sourcePod: + type: string + description: 'Pod where the backup is taken from. + + ' + systemIdentifier: + type: string + description: 'Postgres *system identifier* of the cluster this + backup is taken from. + + ' + postgresVersion: + type: string + description: 'Postgres version of the server where the backup + is taken from. + + ' + pgData: + type: string + description: 'Data directory where the backup is taken from. + + ' + size: + type: object + properties: + uncompressed: + type: integer + format: int64 + description: 'Size (in bytes) of the uncompressed backup. + + ' + compressed: + type: integer + format: int64 + description: 'Size (in bytes) of the compressed backup. + + ' + lsn: + type: object + properties: + start: + type: string + description: 'LSN of when the backup started. + + ' + end: + type: string + description: 'LSN of when the backup finished. + + ' + startWalFile: + type: string + description: 'WAL segment file name when the backup was started. + + ' + timeline: + type: string + description: 'Backup timeline. + + ' + controlData: + type: object + description: 'An object containing data from the output of pg_controldata + on the backup. + + ' + properties: + pg_control version number: + type: string + Catalog version number: + type: string + Database system identifier: + type: string + Database cluster state: + type: string + pg_control last modified: + type: string + Latest checkpoint location: + type: string + Latest checkpoint's REDO location: + type: string + Latest checkpoint's REDO WAL file: + type: string + Latest checkpoint's TimeLineID: + type: string + Latest checkpoint's PrevTimeLineID: + type: string + Latest checkpoint's full_page_writes: + type: string + Latest checkpoint's NextXID: + type: string + Latest checkpoint's NextOID: + type: string + Latest checkpoint's NextMultiXactId: + type: string + Latest checkpoint's NextMultiOffset: + type: string + Latest checkpoint's oldestXID: + type: string + Latest checkpoint's oldestXID's DB: + type: string + Latest checkpoint's oldestActiveXID: + type: string + Latest checkpoint's oldestMultiXid: + type: string + Latest checkpoint's oldestMulti's DB: + type: string + Latest checkpoint's oldestCommitTsXid: + type: string + Latest checkpoint's newestCommitTsXid: + type: string + Time of latest checkpoint: + type: string + Fake LSN counter for unlogged rels: + type: string + Minimum recovery ending location: + type: string + Min recovery ending loc's timeline: + type: string + Backup start location: + type: string + Backup end location: + type: string + End-of-backup record required: + type: string + wal_level setting: + type: string + wal_log_hints setting: + type: string + max_connections setting: + type: string + max_worker_processes setting: + type: string + max_wal_senders setting: + type: string + max_prepared_xacts setting: + type: string + max_locks_per_xact setting: + type: string + track_commit_timestamp setting: + type: string + Maximum data alignment: + type: string + Database block size: + type: string + Blocks per segment of large relation: + type: string + WAL block size: + type: string + Bytes per WAL segment: + type: string + Maximum length of identifiers: + type: string + Maximum columns in an index: + type: string + Maximum size of a TOAST chunk: + type: string + Size of a large-object chunk: + type: string + Date/time type storage: + type: string + Float4 argument passing: + type: string + Float8 argument passing: + type: string + Data page checksum version: + type: string + Mock authentication nonce: + type: string + sgBackupConfig: + type: object + description: The backup configuration used to perform this backup. + properties: + baseBackups: + type: object + description: 'Back backups configuration. + + ' + properties: + cronSchedule: + type: string + description: 'Continuous Archiving backups are composed + of periodic *base backups* and all the WAL segments produced + in between those base backups. This parameter specifies + at what time and with what frequency to start performing + a new base backup. + + + Use cron syntax (`m h dom mon dow`) for this parameter, + i.e., 5 values separated by spaces: + + * `m`: minute, 0 to 59 + + * `h`: hour, 0 to 23 + + * `dom`: day of month, 1 to 31 (recommended not to set + it higher than 28) + + * `mon`: month, 1 to 12 + + * `dow`: day of week, 0 to 7 (0 and 7 both represent + Sunday) + + + Also ranges of values (`start-end`), the symbol `*` (meaning + `first-last`) or even `*/N`, where `N` is a number, meaning + every `N`, may be used. All times are UTC. It is recommended + to avoid 00:00 as base backup time, to avoid overlapping + with any other external operations happening at this time. + + ' + retention: + type: integer + minimum: 1 + description: 'Based on this parameter, an automatic retention + policy is defined to delete old base backups. + + This parameter specifies the number of base backups to + keep, in a sliding window. + + Consequently, the time range covered by backups is `periodicity*retention`, + where `periodicity` is the separation between backups + as specified by the `cronSchedule` property. + + + Default is 5. + + ' + compression: + type: string + description: 'Select the backup compression algorithm. Possible + options are: lz4, lzma, brotli. The default method is + `lz4`. LZ4 is the fastest method, but compression ratio + is the worst. LZMA is way slower, but it compresses backups + about 6 times better than LZ4. Brotli is a good trade-off + between speed and compression ratio, being about 3 times + better than LZ4. + + ' + enum: + - lz4 + - lzma + - brotli + performance: + type: object + properties: + maxNetworkBandwitdh: + type: integer + description: '**Deprecated**: use instead maxNetworkBandwidth. + + + Maximum storage upload bandwidth to be used when storing + the backup. In bytes (per second). + + ' + maxDiskBandwitdh: + type: integer + description: '**Deprecated**: use instead maxDiskBandwidth. + + + Maximum disk read I/O when performing a backup. In + bytes (per second). + + ' + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth to be + used when storing the backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + uploadDiskConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to store the data. This parameter configures + the number of parallel streams to use to reading from + disk. By default, it''s set to 1 (use one stream). + + ' + uploadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to store the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to 1 (use one stream). + + ' + compression: + type: string + description: 'Select the backup compression algorithm. Possible + options are: lz4, lzma, brotli. The default method is `lz4`. + LZ4 is the fastest method, but compression ratio is the worst. + LZMA is way slower, but it compresses backups about 6 times + better than LZ4. Brotli is a good trade-off between speed + and compression ratio, being about 3 times better than LZ4. + + ' + enum: + - lz4 + - lzma + - brotli + storage: + type: object + description: 'Backup storage configuration. + + ' + properties: + type: + type: string + enum: + - s3 + - s3Compatible + - gcs + - azureBlob + description: 'Specifies the type of object storage used + for storing the base backups and WAL segments. + + Possible values: + + * `s3`: Amazon Web Services S3 (Simple Storage Service). + + * `s3Compatible`: non-AWS services that implement a compatibility + API with AWS S3. + + * `gcs`: Google Cloud Storage. + + * `azureBlob`: Microsoft Azure Blob Storage. + + ' + s3: + type: object + description: 'Amazon Web Services S3 configuration. + + ' + properties: + bucket: + type: string + pattern: ^[^/]+(/[^/]*)*$ + description: 'AWS S3 bucket name. + + ' + path: + type: string + pattern: ^(/[^/]*)*$ + description: 'Optional path within the S3 bucket. Note + that StackGres generates in any case a folder per + + StackGres cluster, using the `SGCluster.metadata.name`. + + ' + region: + type: string + description: 'AWS S3 region. The Region may be detected + using s3:GetBucketLocation, but to avoid giving permissions + to this API call or forbid it from the applicable + IAM policy, this property must be explicitely specified. + + ' + storageClass: + type: string + description: '[Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + used for the backup object storage. By default, the + `STANDARD` storage class is used. Other supported + values include `STANDARD_IA` for Infrequent Access + and `REDUCED_REDUNDANCY`. + + ' + awsCredentials: + type: object + description: 'Credentials to access AWS S3 for writing + and reading. + + ' + properties: + secretKeySelectors: + type: object + description: 'Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)s + to reference the Secrets that contain the information + about the `awsCredentials`. + + ' + properties: + accessKeyId: + type: object + description: '[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + containing the AWS Access Key ID secret. + + ' + properties: + key: + type: string + description: 'The key of the secret to select + from. Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + secretAccessKey: + type: object + description: '[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + containing the AWS Secret Access Key secret. + + ' + properties: + key: + type: string + description: 'The key of the secret to select + from. Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - accessKeyId + - secretAccessKey + required: + - secretKeySelectors + required: + - bucket + - awsCredentials + s3Compatible: + type: object + description: AWS S3-Compatible API configuration + properties: + bucket: + type: string + pattern: ^[^/]+(/[^/]*)*$ + description: 'Bucket name. + + ' + path: + type: string + pattern: ^(/[^/]*)*$ + description: 'Optional path within the S3 bucket. Note + that StackGres generates in any case a folder per + StackGres cluster, using the `SGCluster.metadata.name`. + + ' + enablePathStyleAddressing: + type: boolean + description: 'Enable path-style addressing (i.e. `http://s3.amazonaws.com/BUCKET/KEY`) + when connecting to an S3-compatible service that lacks + support for sub-domain style bucket URLs (i.e. `http://BUCKET.s3.amazonaws.com/KEY`). + Defaults to false. + + ' + endpoint: + type: string + description: 'Overrides the default url to connect to + an S3-compatible service. + + For example: `http://s3-like-service:9000`. + + ' + region: + type: string + description: 'AWS S3 region. The Region may be detected + using s3:GetBucketLocation, but to avoid giving permissions + to this API call or forbid it from the applicable + IAM policy, this property must be explicitely specified. + + ' + storageClass: + type: string + description: '[Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + used for the backup object storage. By default, the + `STANDARD` storage class is used. Other supported + values include `STANDARD_IA` for Infrequent Access + and `REDUCED_REDUNDANCY`. + + ' + awsCredentials: + type: object + description: 'Credentials to access AWS S3 for writing + and reading. + + ' + properties: + secretKeySelectors: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + to reference the Secrets that contain the information + about the `awsCredentials`. + + ' + properties: + accessKeyId: + type: object + description: '[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + containing the AWS Access Key ID secret. + + ' + properties: + key: + type: string + description: 'The key of the secret to select + from. Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + secretAccessKey: + type: object + description: '[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + containing the AWS Secret Access Key secret. + + ' + properties: + key: + type: string + description: 'The key of the secret to select + from. Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - accessKeyId + - secretAccessKey + required: + - secretKeySelectors + required: + - bucket + - awsCredentials + gcs: + type: object + description: 'Google Cloud Storage configuration. + + ' + properties: + bucket: + type: string + pattern: ^[^/]+(/[^/]*)*$ + description: 'GCS bucket name. + + ' + path: + type: string + pattern: ^(/[^/]*)*$ + description: 'Optional path within the GCS bucket. Note + that StackGres generates in any case a folder per + StackGres cluster, using the `SGCluster.metadata.name`. + + ' + gcpCredentials: + type: object + description: 'Credentials to access GCS for writing + and reading. + + ' + properties: + fetchCredentialsFromMetadataService: + type: boolean + description: 'If true, the credentials will be fetched + from the GCE/GKE metadata service and the credentials + from `secretKeySelectors` field will not be used. + + + This is useful when running StackGres inside a + GKE cluster using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). + + ' + secretKeySelectors: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + to reference the Secrets that contain the information + about the Service Account to access GCS. + + ' + properties: + serviceAccountJSON: + type: object + description: 'A service account key from GCP. + In JSON format, as downloaded from the GCP + Console. + + ' + properties: + key: + type: string + description: 'The key of the secret to select + from. Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - serviceAccountJSON + required: + - bucket + - gcpCredentials + azureBlob: + type: object + description: 'Azure Blob Storage configuration. + + ' + properties: + bucket: + type: string + pattern: ^[^/]+(/[^/]*)*$ + description: 'Azure Blob Storage bucket name. + + ' + path: + type: string + pattern: ^(/[^/]*)*$ + description: 'Optional path within the Azure Blobk bucket. + Note that StackGres generates in any case a folder + per StackGres cluster, using the `SGCluster.metadata.name`. + + ' + azureCredentials: + type: object + description: 'Credentials to access Azure Blob Storage + for writing and reading. + + ' + properties: + secretKeySelectors: + type: object + description: 'Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)s + to reference the Secrets that contain the information + about the `azureCredentials`. + + ' + properties: + storageAccount: + type: object + description: '[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + containing the name of the storage account. + + ' + properties: + key: + type: string + description: 'The key of the secret to select + from. Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + accessKey: + type: object + description: '[SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + containing the primary or secondary access + key for the storage account. + + ' + properties: + key: + type: string + description: 'The key of the secret to select + from. Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - storageAccount + - accessKey + required: + - bucket + - azureCredentials + required: + - type + required: + - storage + volumeSnapshot: + type: object + description: The volume snapshot configuration used to restore this + backup. + properties: + name: + type: string + description: 'The volume snapshot used to store this backup. + + ' + backupLabel: + type: string + description: 'The content of `backup_label` column returned + by `pg_backup_stop` encoded in Base64 + + ' + tablespaceMap: + type: string + description: 'The content of `tablespace_map` column returned + by `pg_backup_stop` encoded in Base64 + + ' diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgclusters.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgclusters.yaml new file mode 100644 index 00000000000..1c5081e0310 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgclusters.yaml @@ -0,0 +1,9872 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgclusters.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGCluster + listKind: SGClusterList + plural: sgclusters + singular: sgcluster + shortNames: + - sgclu + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: version + type: string + jsonPath: .spec.postgres.version + - name: instances + type: integer + jsonPath: .spec.instances + - name: Profile + type: string + jsonPath: .spec.sgInstanceProfile + - name: Disk + type: string + jsonPath: .spec.pods.persistentVolume.size + - name: prometheus-autobind + type: string + jsonPath: .spec.configurations.observability.prometheusAutobind + priority: 1 + - name: pool-config + type: string + jsonPath: .spec.configurations.sgPoolingConfig + priority: 1 + - name: postgres-config + type: string + jsonPath: .spec.configurations.sgPostgresConfig + priority: 1 + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + labelSelectorPath: .status.labelSelector + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 44 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the StackGres cluster. Following [Kubernetes + naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + The name must be unique across all SGCluster, SGShardedCluster + and SGDistributedLogs in the same namespace. + + ' + spec: + type: object + description: 'Specification of the desired behavior of a StackGres cluster. + + ' + required: + - instances + - postgres + - pods + properties: + profile: + type: string + description: "The profile allow to change in a convenient place\ + \ a set of configuration defaults that affect how the cluster\ + \ is generated.\n\nAll those defaults can be overwritten by setting\ + \ the correspoinding fields.\n\nAvailable profiles are:\n\n* `production`:\n\ + \n Prevents two Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `false` by default).\n Sets both limits and requests using\ + \ `SGInstanceProfile` for `patroni` container that runs both Patroni\ + \ and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced\ + \ `SGInstanceProfile` for sidecar containers other than `patroni`\ + \ (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `testing`:\n\n Allows two Pods\ + \ to running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Sets both limits and requests using\ + \ `SGInstanceProfile` for `patroni` container that runs both Patroni\ + \ and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced\ + \ `SGInstanceProfile` for sidecar containers other than `patroni`\ + \ (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `development`:\n\n Allows two\ + \ Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Unset both limits and requests for\ + \ `patroni` container that runs both Patroni and Postgres (set\ + \ `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `true` by default).\n Unsets requests for sidecar containers\ + \ other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `true` by default).\n\n**Changing this field may require\ + \ a restart.**\n" + default: production + postgres: + type: object + description: 'This section allows to configure Postgres features + + ' + required: + - version + properties: + version: + type: string + description: 'Postgres version used on the cluster. It is either + of: + + * The string ''latest'', which automatically sets the latest + major.minor Postgres version. + + * A major version, like ''14'' or ''13'', which sets that + major version and the latest minor version. + + * A specific major.minor version, like ''14.4''. + + ' + flavor: + type: string + description: "Postgres flavor used on the cluster. It is either\ + \ of:\n\n * `vanilla` will use the [Official Postgres](https://www.postgresql.org/)\n\ + \ * `babelfish` will use the [Babelfish for Postgres](https://babelfish-for-postgresql.github.io/babelfish-for-postgresql/).\n\ + \nIf not specified then the vanilla Postgres will be used\ + \ for the cluster.\n\n**This field can only be set on creation.**\n" + default: vanilla + extensions: + type: array + description: "StackGres support deploy of extensions at runtime\ + \ by simply adding an entry to this array. A deployed extension\ + \ still\nrequires the creation in a database using the [`CREATE\ + \ EXTENSION`](https://www.postgresql.org/docs/current/sql-createextension.html)\n\ + statement. After an extension is deployed correctly it will\ + \ be present until removed and the cluster restarted.\n\n\ + A cluster restart is required for:\n* Extensions that requires\ + \ to add an entry to [`shared_preload_libraries`](https://postgresqlco.nf/en/doc/param/shared_preload_libraries/)\ + \ configuration parameter.\n* Upgrading extensions that overwrite\ + \ any file that is not the extension''s control file or extension''s\ + \ script file.\n* Removing extensions. Until the cluster is\ + \ not restarted a removed extension will still be available.\n\ + * Install of extensions that require extra mount. After installed\ + \ the cluster will require to be restarted.\n\n**Example:**\n\ + \n``` yaml\napiVersion: stackgres.io/v1\nkind: SGCluster\n\ + metadata:\n name: stackgres\nspec:\n postgres:\n extensions:\n\ + \ - {name: 'timescaledb', version: '2.3.1'}\n```\n" + items: + type: object + required: + - name + properties: + name: + type: string + description: The name of the extension to deploy. + publisher: + type: string + description: The id of the publisher of the extension + to deploy. If not specified `com.ongres` will be used + by default. + default: com.ongres + version: + type: string + description: The version of the extension to deploy. If + not specified version of `stable` channel will be used + by default and if only a version is available that one + will be used. + repository: + type: string + description: 'The repository base URL from where to obtain + the extension to deploy. + + + **This section is filled by the operator.** + + ' + ssl: + type: object + description: "This section allows to use SSL when connecting\ + \ to Postgres\n\n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1\n\ + kind: SGCluster\nmetadata:\n name: stackgres\nspec:\n postgres:\n\ + \ ssl:\n enabled: true\n certificateSecretKeySelector:\n\ + \ name: stackgres-secrets\n key: cert\n \ + \ privateKeySecretKeySelector:\n name: stackgres-secrets\n\ + \ key: key\n```\n" + properties: + enabled: + type: boolean + description: 'Allow to enable SSL for connections to Postgres. + By default is `false`. + + + If `true` certificate and private key will be auto-generated + unless fields `certificateSecretKeySelector` and `privateKeySecretKeySelector` + are specified. + + ' + certificateSecretKeySelector: + type: object + description: 'Secret key selector for the certificate or + certificate chain used for SSL connections. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The name of Secret that contains the certificate + or certificate chain for SSL connections + + ' + key: + type: string + description: 'The key of Secret that contains the certificate + or certificate chain for SSL connections + + ' + privateKeySecretKeySelector: + type: object + description: 'Secret key selector for the private key used + for SSL connections. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The name of Secret that contains the private + key for SSL connections + + ' + key: + type: string + description: 'The key of Secret that contains the private + key for SSL connections + + ' + instances: + type: integer + minimum: 0 + description: "Number of instances for the StackGres cluster. Each\ + \ instance is a Pod containing one Postgres server.\n Out of\ + \ all of the Postgres servers, one is elected as the primary,\ + \ the rest remain as read-only replicas.\n" + autoscaling: + type: object + description: 'This section allows to configure horizontal and vertical + Pod autoscaling for the SGCluster''s Pods. + + + Horizontal Pod Autoscaling will use replicas connections usage + (active connections / max connections) as the metric to control + the upscale or downscale of the replicas. + + Horizontal Pod Autoscaling require the [KEDA operator](https://github.com/kedacore/keda) + to be installed in the Kuberentes cluster. + + + Vertical Pod Autoscaling will use cpu and memory usage as the + metric to control the upscale or downscale of the Pod requests + and limits resources. + + Vertical Pod Autoscaling requires the [Vertical Pod Autoscaler + operator](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) + to be installed in the Kuberentes cluster. + + ' + properties: + mode: + type: string + description: 'Allow to enable or disable any of horizontal and + vertical Pod autoscaling. + + + Possible values are: + + * `all`: both horizontal and vertical Pod autoscaling will + be enabled (default) + + * `horizontal`: only horizontal Pod autoscaling will be enabled + + * `vertical`: only vertical Pod autoscaling will be enabled + + * `none`: all autoscaling will be disabled + + ' + default: all + minInstances: + type: integer + description: 'The total minimum number of instances that the + SGCluster will have (including the primary instance). + + + This field is ignored when horizontal Pod autoscaling is disabled. + + ' + minimum: 2 + maxInstances: + type: integer + description: 'The total maximum number of instances that the + SGCluster will have (including the primary instance). + + + This field is ignored when horizontal Pod autoscaling is disabled. + + ' + minimum: 2 + minAllowed: + type: object + description: 'Allow to define the lower bound for Pod resources + of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the lower bound for Pod resources + of patroni container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the patroni + container + memory: + type: string + description: The minimum allowed memory for the patroni + container + pgbouncer: + type: object + description: 'Allow to define the lower bound for Pod resources + of pgbouncer container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the pgbouncer + container + memory: + type: string + description: The minimum allowed memory for the pgbouncer + container + envoy: + type: object + description: 'Allow to define the lower bound for Pod resources + of envoy container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the envoy container + memory: + type: string + description: The minimum allowed memory for the envoy + container + maxAllowed: + type: object + description: 'Allow to define the higher bound for Pod resources + of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the higher bound for Pod resources + of patroni container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the patroni + container + memory: + type: string + description: The maximum allowed memory for the patroni + container + pgbouncer: + type: object + description: 'Allow to define the higher bound for Pod resources + of pgbouncer container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the pgbouncer + container + memory: + type: string + description: The maximum allowed memory for the pgbouncer + container + envoy: + type: object + description: 'Allow to define the higher bound for Pod resources + of envoy container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the envoy container + memory: + type: string + description: The maximum allowed memory for the envoy + container + horizontal: + type: object + description: 'Section to configure horizontal Pod autoscaling + aspects. + + ' + properties: + replicasConnectionsUsageTarget: + type: string + description: 'The target value for replicas connections + used in order to trigger the upscale of replica instances. + + ' + default: '0.8' + replicasConnectionsUsageMetricType: + type: string + description: 'The metric type for connections used metric. + See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + default: AverageValue + cooldownPeriod: + type: integer + description: 'The period in seconds before the downscale + of replica instances can be triggered. + + ' + default: 300 + pollingInterval: + type: integer + description: 'The interval in seconds to check if the scaleup + or scaledown have to be triggered. + + ' + default: 30 + vertical: + type: object + description: 'Section to configure vertical Pod autoscaling + aspects. + + ' + properties: + recommender: + type: string + description: 'Recommender responsible for generating recommendation + for vertical Pod autoscaling. If not specified the default + one will be used. + + ' + replication: + type: object + description: "This section allows to configure Postgres replication\ + \ mode and HA roles groups.\n\nThe main replication group is implicit\ + \ and contains the total number of instances less the sum of all\n\ + \ instances in other replication groups.\n\nThe total number\ + \ of instances is always specified by `.spec.instances`.\n" + properties: + mode: + type: string + description: "The replication mode applied to the whole cluster.\n\ + Possible values are:\n* `async` (default)\n* `sync`\n* `strict-sync`\n\ + * `sync-all`\n* `strict-sync-all`\n\n**async**\n\nWhen in\ + \ asynchronous mode the cluster is allowed to lose some committed\ + \ transactions.\n When the primary server fails or becomes\ + \ unavailable for any other reason a sufficiently healthy\ + \ standby\n will automatically be promoted to primary. Any\ + \ transactions that have not been replicated to that standby\n\ + \ remain in a \"forked timeline\" on the primary, and are\ + \ effectively unrecoverable (the data is still there,\n but\ + \ recovering it requires a manual recovery effort by data\ + \ recovery specialists).\n\n**sync**\n\nWhen in synchronous\ + \ mode a standby will not be promoted unless it is certain\ + \ that the standby contains all\n transactions that may have\ + \ returned a successful commit status to client (clients can\ + \ change the behavior\n per transaction using PostgreSQL’s\ + \ `synchronous_commit` setting. Transactions with `synchronous_commit`\n\ + \ values of `off` and `local` may be lost on fail over, but\ + \ will not be blocked by replication delays). This\n means\ + \ that the system may be unavailable for writes even though\ + \ some servers are available. System\n administrators can\ + \ still use manual failover commands to promote a standby\ + \ even if it results in transaction\n loss.\n\nSynchronous\ + \ mode does not guarantee multi node durability of commits\ + \ under all circumstances. When no suitable\n standby is\ + \ available, primary server will still accept writes, but\ + \ does not guarantee their replication. When\n the primary\ + \ fails in this mode no standby will be promoted. When the\ + \ host that used to be the primary comes\n back it will get\ + \ promoted automatically, unless system administrator performed\ + \ a manual failover. This behavior\n makes synchronous mode\ + \ usable with 2 node clusters.\n\nWhen synchronous mode is\ + \ used and a standby crashes, commits will block until the\ + \ primary is switched to standalone\n mode. Manually shutting\ + \ down or restarting a standby will not cause a commit service\ + \ interruption. Standby will\n signal the primary to release\ + \ itself from synchronous standby duties before PostgreSQL\ + \ shutdown is initiated.\n\n**strict-sync**\n\nWhen it is\ + \ absolutely necessary to guarantee that each write is stored\ + \ durably on at least two nodes, use the strict\n synchronous\ + \ mode. This mode prevents synchronous replication to be switched\ + \ off on the primary when no synchronous\n standby candidates\ + \ are available. As a downside, the primary will not be available\ + \ for writes (unless the Postgres\n transaction explicitly\ + \ turns off `synchronous_mode` parameter), blocking all client\ + \ write requests until at least one\n synchronous replica\ + \ comes up.\n\n**Note**: Because of the way synchronous replication\ + \ is implemented in PostgreSQL it is still possible to lose\n\ + \ transactions even when using strict synchronous mode. If\ + \ the PostgreSQL backend is cancelled while waiting to acknowledge\n\ + \ replication (as a result of packet cancellation due to\ + \ client timeout or backend failure) transaction changes become\n\ + \ visible for other backends. Such changes are not yet replicated\ + \ and may be lost in case of standby promotion.\n\n**sync-all**\n\ + \nThe same as `sync` but `syncInstances` is ignored and the\ + \ number of synchronous instances is equals to the total number\n\ + \ of instances less one.\n\n**strict-sync-all**\n\nThe same\ + \ as `strict-sync` but `syncInstances` is ignored and the\ + \ number of synchronous instances is equals to the total number\n\ + \ of instances less one.\n" + default: async + role: + type: string + description: 'This role is applied to the instances of the implicit + replication group that is composed by `.spec.instances` number + of instances. + + Possible values are: + + * `ha-read` (default) + + * `ha` + + The primary instance will be elected among all the replication + groups that are either `ha` or `ha-read`. + + Only if the role is set to `ha-read` instances of main replication + group will be exposed via the replicas service. + + ' + default: ha-read + syncInstances: + type: integer + minimum: 1 + description: "Number of synchronous standby instances. Must\ + \ be less than the total number of instances. It is set to\ + \ 1 by default.\n Only setteable if mode is `sync` or `strict-sync`.\n" + groups: + type: array + description: "StackGres support replication groups where a replication\ + \ group of a specified number of instances could have different\n\ + \ replication role. The main replication group is implicit\ + \ and contains the total number of instances less the sum\ + \ of all\n instances in other replication groups.\n" + items: + type: object + required: + - role + - instances + properties: + name: + type: string + description: The name of the replication group. If not + set will default to the `group-`. + role: + type: string + description: 'This role is applied to the instances of + this replication group. + + Possible values are: + + * `ha-read` + + * `ha` + + * `readonly` + + * `none` + + The primary instance will be elected among all the replication + groups that are either `ha` or `ha-read`. + + Only if the role is set to `readonly` or `ha-read` instances + of such replication group will be exposed via the replicas + service. + + ' + instances: + type: integer + minimum: 1 + description: "Number of StackGres instances for this replication\ + \ group.\n\nThe total number of instance of a cluster\ + \ is always `.spec.instances`. The sum of the instances\ + \ in all the replication groups must be\n less than\ + \ the total number of instances.\n" + minInstances: + type: integer + minimum: 1 + description: "Minimum number of StackGres instances for\ + \ this replication group. It is ignored when horizontal\ + \ Pod autoscaling is disabled (see `.spec.autoscaling`)\n\ + \nThe total minimum number of instance of a cluster\ + \ is always `.spec.autoscaling.minInstances`. The sum\ + \ of the minimum instances in all the replication groups\ + \ must be\n less than the total minimum number of instances.\n\ + \nWhen this field is set the instances value that is\ + \ provided by the user it is overwritten using the following\ + \ formula to calculate it:\n\n```\n = max(,\ + \ * / )\n```\n" + initialization: + type: object + description: 'Allow to specify how the replicas are initialized. + + ' + properties: + mode: + type: string + description: "Allow to specify how the replicas are initialized.\n\ + \nPossible values are:\n\n* `FromPrimary`: When this mode\ + \ is used replicas will be always created from the primary\ + \ using `pg_basebackup`.\n* `FromReplica`: When this mode\ + \ is used replicas will be created from another existing\ + \ replica using\n `pg_basebackup`. Fallsback to `FromPrimary`\ + \ if there's no replica or it fails.\n* `FromExistingBackup`:\ + \ When this mode is used replicas will be created from\ + \ an existing SGBackup. If `backupNewerThan` is set\n\ + \ the SGBackup must be newer than its value. When this\ + \ mode fails to restore an SGBackup it will try with a\ + \ previous one (if exists).\n Fallsback to `FromReplica`\ + \ if there's no backup left or it fails.\n* `FromNewlyCreatedBackup`:\ + \ When this mode is used replicas will be created from\ + \ a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent backup\ + \ newer than its value or it fails.\n" + default: FromExistingBackup + backupNewerThan: + type: string + description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`,\ + \ that specifies how old an SGBackup have to be in order\ + \ to be seleceted\n to initialize a replica.\n\nWhen `FromExistingBackup`\ + \ mode is set this field restrict the selection of SGBackup\ + \ to be used for recovery newer than the\n specified value.\ + \ \n\nWhen `FromNewlyCreatedBackup` mode is set this field\ + \ skip the creation SGBackup to be used for recovery if\ + \ one newer than\n the specified value exists. \n" + backupRestorePerformance: + type: object + description: 'Configuration that affects the backup network + and disk usage performance during recovery. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth used + when storing a backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to read the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to the minimum between the number of file + to read and 10. + + ' + sgInstanceProfile: + type: string + description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/reference/crd/sginstanceprofile/). + + + A SGInstanceProfile defines CPU and memory limits. Must exist + before creating a cluster. + + + When no profile is set, a default (1 core, 2 GiB RAM) one is used. + + + **Changing this field may require a restart.** + + ' + metadata: + type: object + description: Metadata information for cluster created resources. + properties: + annotations: + type: object + description: "Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)\ + \ to be passed to resources created and managed by StackGres.\n\ + \n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1\nkind:\ + \ SGCluster\nmetadata:\n name: stackgres\nspec:\n metadata:\n\ + \ annotations:\n clusterPods:\n key: value\n\ + \ primaryService:\n key: value\n replicasService:\n\ + \ key: value\n```\n" + properties: + allResources: + type: object + description: Annotations to attach to any resource created + or managed by StackGres. + additionalProperties: + type: string + clusterPods: + type: object + description: Annotations to attach to pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Annotations to attach to all services created + or managed by StackGres. + additionalProperties: + type: string + primaryService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-primary` service. + additionalProperties: + type: string + replicasService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-replicas` service. + additionalProperties: + type: string + labels: + type: object + description: "Custom Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\ + \ to be passed to resources created and managed by StackGres.\n\ + \n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1\nkind:\ + \ SGCluster\nmetadata:\n name: stackgres\nspec:\n metadata:\n\ + \ labels:\n clusterPods:\n customLabel: customLabelValue\n\ + \ services:\n customLabel: customLabelValue\n\ + ```\n" + properties: + clusterPods: + type: object + description: Labels to attach to Pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Labels to attach to Services and Endpoints + created or managed by StackGres. + additionalProperties: + type: string + postgresServices: + type: object + description: Kubernetes [services](https://kubernetes.io/docs/concepts/services-networking/service/) + created or managed by StackGres. + properties: + primary: + type: object + description: "Configure the service to the primary with the\ + \ same name as the SGCluster. A legacy service \n\nProvides\ + \ a stable connection (regardless of primary failures or switchovers)\ + \ to the read-write Postgres server of the cluster.\n\nSee\ + \ also https://kubernetes.io/docs/concepts/services-networking/service/\n" + properties: + enabled: + type: boolean + description: Specify if the service should be created or + not. + default: true + type: + type: string + enum: + - ClusterIP + - LoadBalancer + - NodePort + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. "ClusterIP" + allocates + + a cluster-internal IP address for load-balancing to endpoints. + + "NodePort" builds on ClusterIP and allocates a port on + every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current + cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + default: ClusterIP + customPorts: + type: array + description: "The list of custom ports that will be exposed\ + \ by the service.\n\nThe names of custom ports will be\ + \ prefixed with the string `c-` so they do not\n conflict\ + \ with ports defined for the service.\n\nThe names of\ + \ target ports will be prefixed with the string `c-` so\ + \ that the ports\n that can be referenced in this section\ + \ will be only those defined under\n .spec.pods.customContainers[].ports\ + \ sections were names are also prepended with the same\n\ + \ prefix.\n\n**Changing this field may require a restart.**\n\ + \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + items: + description: "A custom port that will be exposed by the\ + \ service.\n\nThe name of the custom port will be prefixed\ + \ with the string `c-` so it does not\n conflict with\ + \ ports defined for the service.\n\nThe name of target\ + \ port will be prefixed with the string `c-` so that\ + \ the port\n that can be referenced in this section\ + \ will be only those defined under\n .spec.pods.customContainers[].ports\ + \ sections were names are also prepended with the same\n\ + \ prefix.\n \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + properties: + appProtocol: + description: "The application protocol for this port.\ + \ This is used as a hint for implementations to\ + \ offer richer behavior for protocols that they\ + \ understand. This field follows standard Kubernetes\ + \ label syntax. Valid values are either:\n\n* Un-prefixed\ + \ protocol names - reserved for IANA standard service\ + \ names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\ + \n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c'\ + \ - HTTP/2 prior knowledge over cleartext as described\ + \ in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n\ + \ * 'kubernetes.io/ws' - WebSocket over cleartext\ + \ as described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \ * 'kubernetes.io/wss' - WebSocket over TLS as\ + \ described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \n* Other protocols should use implementation-defined\ + \ prefixed names such as mycompany.com/my-custom-protocol." + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field + in the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this + service is exposed when type is NodePort or LoadBalancer. Usually + assigned by the system. If a value is specified, + in-range, and not in use it will be used, otherwise + the operation will fail. If not specified, a port + will be allocated if this Service requires one. If + this field is specified when creating a Service + which does not need it, creation will fail. This + field will be wiped when updating a Service to no + longer need it (e.g. changing type from NodePort + to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this + service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + description: IntOrString is a type that can hold an + int32 or a string. When used in JSON or YAML marshalling + and unmarshalling, it produces or consumes the inner + type. This allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if NodePorts + will be automatically allocated for services with type + LoadBalancer. Default is "true". It may be set to "false" + if the cluster load-balancer does not rely on NodePorts. If + the caller requests specific NodePorts (by specifying + a value), those requests will be respected, regardless + of this field. This field may only be set for services + with type LoadBalancer and will be cleared if the type + is changed to any other type. + type: boolean + externalIPs: + description: 'externalIPs is a list of IP addresses for + which nodes in the cluster will also accept traffic for + this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes system. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#allocateloadbalancernodeports-v1-core' + items: + type: string + type: array + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes distribute + service traffic they receive on one of the Service's "externally-facing" + addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). + If set to "Local", the proxy will configure the service + in a way that assumes that external load balancers will + take care of balancing the service traffic between nodes, + and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client + source IP. (Traffic mistakenly sent to a node with no + endpoints will be dropped.) The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer + IP from within the cluster will always get "Cluster" semantics, + but clients sending to a NodePort from within the cluster + may need to take traffic policy into account when picking + a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is set + to Local. If a value is specified, is in-range, and is + not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. + load-balancers) can use this port to determine if a given + node holds endpoints for this service or not. If this + field is specified when creating a Service which does + not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing + type). This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes distribute + service traffic they receive on the ClusterIP. If set + to "Local", the proxy will assume that pods only want + to talk to endpoints of the service on the same node as + the pod, dropping the traffic if there are no local endpoints. + The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified + by topology and other features). + type: string + ipFamilies: + description: 'IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is usually + assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise + creation of the service will fail. This field is conditionally + mutable: it allows for adding or removing a secondary + IP family, but it does not allow changing the primary + IP family of the Service. Valid values are "IPv4" and + "IPv6". This field only applies to Services of types + ClusterIP, NodePort, and LoadBalancer, and does apply + to "headless" services. This field will be wiped when + updating a Service to type ExternalName. + + + This field may hold a maximum of two entries (dual-stack + families, in either order). These families must correspond + to the values of the clusterIPs field, if specified. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy + field.' + items: + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is no + value provided, then this field will be set to SingleStack. + Services can be "SingleStack" (a single IP family), "PreferDualStack" + (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend on + the value of this field. This field will be wiped when + updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If specified, + the value of this field must be a label-style identifier, + with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". + Unprefixed names are reserved for end-users. This field + can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is + used, today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any default + load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only + be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped + when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature. Deprecated: This field was + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client + IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that any + agent which deals with endpoints for this Service should + disregard any indications of ready/not-ready. The primary + use case for setting this field is for a StatefulSet's + Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes + controllers that generate Endpoints and EndpointSlice + resources for Services interpret this to mean that all + endpoints are considered "ready" even if the Pods themselves + are not. Agents which consume only Kubernetes generated + endpoints through the Endpoints or EndpointSlice resources + can safely assume this behavior. + type: boolean + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: 'SessionAffinityConfig represents the configurations + of session affinity. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#sessionaffinityconfig-v1-core' + properties: + clientIP: + description: ClientIPConfig represents the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + nodePorts: + type: object + description: nodePorts is a list of ports for exposing a + cluster services to the outside world + properties: + pgport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance + replicationport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance for replication purpose + babelfish: + type: integer + description: the node port that will be exposed to connect + to Babelfish instance using SQL Server wire-protocol + and T-SQL + replicas: + type: object + description: 'Configure the service to any replica with the + name as the SGCluster plus the `-replicas` suffix. + + + It provides a stable connection (regardless of replica node + failures) to any read-only Postgres server of the cluster. + Read-only servers are load-balanced via this service. + + + See also https://kubernetes.io/docs/concepts/services-networking/service/ + + ' + properties: + enabled: + type: boolean + description: Specify if the service should be created or + not. + default: true + type: + type: string + enum: + - ClusterIP + - LoadBalancer + - NodePort + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. "ClusterIP" + allocates + + a cluster-internal IP address for load-balancing to endpoints. + + "NodePort" builds on ClusterIP and allocates a port on + every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current + cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + default: ClusterIP + customPorts: + type: array + description: "The list of custom ports that will be exposed\ + \ by the service.\n\nThe names of custom ports will be\ + \ prefixed with the string `c-` so they do not\n conflict\ + \ with ports defined for the service.\n\nThe names of\ + \ target ports will be prefixed with the string `c-` so\ + \ that the ports\n that can be referenced in this section\ + \ will be only those defined under\n .spec.pods.customContainers[].ports\ + \ sections were names are also prepended with the same\n\ + \ prefix.\n\n**Changing this field may require a restart.**\n\ + \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + items: + description: "A custom port that will be exposed by the\ + \ service.\n\nThe name of the custom port will be prefixed\ + \ with the string `c-` so it does not\n conflict with\ + \ ports defined for the service.\n\nThe name of target\ + \ port will be prefixed with the string `c-` so that\ + \ the port\n that can be referenced in this section\ + \ will be only those defined under\n .spec.pods.customContainers[].ports\ + \ sections were names are also prepended with the same\n\ + \ prefix.\n \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + properties: + appProtocol: + description: "The application protocol for this port.\ + \ This is used as a hint for implementations to\ + \ offer richer behavior for protocols that they\ + \ understand. This field follows standard Kubernetes\ + \ label syntax. Valid values are either:\n\n* Un-prefixed\ + \ protocol names - reserved for IANA standard service\ + \ names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\ + \n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c'\ + \ - HTTP/2 prior knowledge over cleartext as described\ + \ in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n\ + \ * 'kubernetes.io/ws' - WebSocket over cleartext\ + \ as described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \ * 'kubernetes.io/wss' - WebSocket over TLS as\ + \ described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \n* Other protocols should use implementation-defined\ + \ prefixed names such as mycompany.com/my-custom-protocol." + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field + in the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this + service is exposed when type is NodePort or LoadBalancer. Usually + assigned by the system. If a value is specified, + in-range, and not in use it will be used, otherwise + the operation will fail. If not specified, a port + will be allocated if this Service requires one. If + this field is specified when creating a Service + which does not need it, creation will fail. This + field will be wiped when updating a Service to no + longer need it (e.g. changing type from NodePort + to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this + service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + description: IntOrString is a type that can hold an + int32 or a string. When used in JSON or YAML marshalling + and unmarshalling, it produces or consumes the inner + type. This allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if NodePorts + will be automatically allocated for services with type + LoadBalancer. Default is "true". It may be set to "false" + if the cluster load-balancer does not rely on NodePorts. If + the caller requests specific NodePorts (by specifying + a value), those requests will be respected, regardless + of this field. This field may only be set for services + with type LoadBalancer and will be cleared if the type + is changed to any other type. + type: boolean + externalIPs: + description: 'externalIPs is a list of IP addresses for + which nodes in the cluster will also accept traffic for + this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes system. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#allocateloadbalancernodeports-v1-core' + items: + type: string + type: array + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes distribute + service traffic they receive on one of the Service's "externally-facing" + addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). + If set to "Local", the proxy will configure the service + in a way that assumes that external load balancers will + take care of balancing the service traffic between nodes, + and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client + source IP. (Traffic mistakenly sent to a node with no + endpoints will be dropped.) The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer + IP from within the cluster will always get "Cluster" semantics, + but clients sending to a NodePort from within the cluster + may need to take traffic policy into account when picking + a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is set + to Local. If a value is specified, is in-range, and is + not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. + load-balancers) can use this port to determine if a given + node holds endpoints for this service or not. If this + field is specified when creating a Service which does + not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing + type). This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes distribute + service traffic they receive on the ClusterIP. If set + to "Local", the proxy will assume that pods only want + to talk to endpoints of the service on the same node as + the pod, dropping the traffic if there are no local endpoints. + The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified + by topology and other features). + type: string + ipFamilies: + description: 'IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is usually + assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise + creation of the service will fail. This field is conditionally + mutable: it allows for adding or removing a secondary + IP family, but it does not allow changing the primary + IP family of the Service. Valid values are "IPv4" and + "IPv6". This field only applies to Services of types + ClusterIP, NodePort, and LoadBalancer, and does apply + to "headless" services. This field will be wiped when + updating a Service to type ExternalName. + + + This field may hold a maximum of two entries (dual-stack + families, in either order). These families must correspond + to the values of the clusterIPs field, if specified. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy + field.' + items: + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is no + value provided, then this field will be set to SingleStack. + Services can be "SingleStack" (a single IP family), "PreferDualStack" + (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend on + the value of this field. This field will be wiped when + updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If specified, + the value of this field must be a label-style identifier, + with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". + Unprefixed names are reserved for end-users. This field + can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is + used, today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any default + load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only + be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped + when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature. Deprecated: This field was + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client + IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that any + agent which deals with endpoints for this Service should + disregard any indications of ready/not-ready. The primary + use case for setting this field is for a StatefulSet's + Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes + controllers that generate Endpoints and EndpointSlice + resources for Services interpret this to mean that all + endpoints are considered "ready" even if the Pods themselves + are not. Agents which consume only Kubernetes generated + endpoints through the Endpoints or EndpointSlice resources + can safely assume this behavior. + type: boolean + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: 'SessionAffinityConfig represents the configurations + of session affinity. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#sessionaffinityconfig-v1-core' + properties: + clientIP: + description: ClientIPConfig represents the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + nodePorts: + type: object + description: nodePorts is a list of ports for exposing a + cluster services to the outside world + properties: + pgport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance + replicationport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance for replication purpose + babelfish: + type: integer + description: the node port that will be exposed to connect + to Babelfish instance using SQL Server wire-protocol + and T-SQL + pods: + type: object + description: Cluster pod's configuration. + required: + - persistentVolume + properties: + persistentVolume: + type: object + description: "Pod's persistent volume configuration.\n\n**Example:**\n\ + \n```yaml\napiVersion: stackgres.io/v1\nkind: SGCluster\n\ + metadata:\n name: stackgres\nspec:\n pods:\n persistentVolume:\n\ + \ size: '5Gi'\n storageClass: default\n```\n" + required: + - size + properties: + size: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$ + description: 'Size of the PersistentVolume set for each + instance of the cluster. This size is specified either + in Mebibytes, Gibibytes or Tebibytes (multiples of 2^20, + 2^30 or 2^40, respectively). + + ' + storageClass: + type: string + description: 'Name of an existing StorageClass in the Kubernetes + cluster, used to create the PersistentVolumes for the + instances of the cluster. + + ' + disableConnectionPooling: + type: boolean + description: 'If set to `true`, avoids creating a connection + pooling (using [PgBouncer](https://www.pgbouncer.org/)) sidecar. + + + **Changing this field may require a restart.** + + ' + default: false + disableMetricsExporter: + type: boolean + description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + disablePostgresUtil: + type: boolean + description: 'If set to `true`, avoids creating the `postgres-util` + sidecar. This sidecar contains usual Postgres administration + utilities *that are not present in the main (`patroni`) container*, + like `psql`. Only disable if you know what you are doing. + + + **Changing this field may require a restart.** + + ' + default: false + disableEnvoy: + type: boolean + description: 'If set to `true`, avoids creating the `envoy` + sidecar. This sidecar is used as the endge proxy for the cluster''s + Pods providing extra metrics to the monitoring layer. + + + **Changing this field may require a restart.** + + ' + default: false + resources: + type: object + description: Pod custom resources configuration. + properties: + enableClusterLimitsRequirements: + type: boolean + description: 'When enabled resource limits for containers + other than the patroni container wil be set just like + for patroni contianer as specified in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + default: false + disableResourcesRequestsSplitFromTotal: + type: boolean + description: "When set to `true` the resources requests\ + \ values in fields `SGInstanceProfile.spec.requests.cpu`\ + \ and `SGInstanceProfile.spec.requests.memory` will represent\ + \ the resources\n requests of the patroni container and\ + \ the total resources requests calculated by adding the\ + \ resources requests of all the containers (including\ + \ the patroni container).\n\n**Changing this field may\ + \ require a restart.**\n" + failWhenTotalIsHigher: + type: boolean + description: "When set to `true` the reconciliation of the\ + \ cluster will fail if `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of the CPU\ + \ or memory\n of all the containers except patroni is\ + \ equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`.\n\nWhen\ + \ `false` (the default) and `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of the CPU\ + \ or memory\n of all the containers except patroni is\ + \ equals or higher than the total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`\n then the\ + \ patroni container resources will be set to 0.\n" + scheduling: + type: object + description: 'Pod custom scheduling, affinity and topology spread + constratins configuration. + + + **Changing this field may require a restart.** + + ' + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match + a node''s labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to + match. Empty means match all taint effects. When + specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If + the key is empty, operator must be Exists; this + combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints + of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect + NoExecute, otherwise this field is ignored) tolerates + the taint. By default, it is not set, which means + tolerate the taint forever (do not evict). Zero + and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a + no-op). A null preferred scheduling term matches + no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range + 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union of + the results of one or more label queries over a set + of nodes; that is, it represents the OR of the selectors + represented by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. + "system-node-critical" and "system-cluster-critical" are + two special keywords which indicate the highest priorities + with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object + with that name. If not specified, the pod priority will + be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the + corresponding podAffinityTerm; the node(s) with the + highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range + 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a + pod label update), the system may or may not try to + eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter pod + anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the greatest + sum of weights, i.e. for each node that meets all + of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the + node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range + 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a + pod label update), the system may or may not try to + eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + topologySpreadConstraints: + description: 'TopologySpreadConstraints describes how a + group of pods ought to spread across topology domains. + Scheduler will schedule pods in a way which abides by + the constraints. All topologySpreadConstraints are ANDed. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#topologyspreadconstraint-v1-core' + items: + description: TopologySpreadConstraint specifies how to + spread matching pods among the given topology. + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: 'MatchLabelKeys is a set of pod label + keys to select the pods over which spreading will + be calculated. The keys are used to lookup values + from the incoming pod labels, those key-value labels + are ANDed with labelSelector to select the group + of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden + to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector + isn''t set. Keys that don''t exist in the incoming + pod labels will be ignored. A null or empty list + means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default).' + items: + type: string + type: array + maxSkew: + description: 'MaxSkew describes the degree to which + pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the + number of matching pods in the target topology and + the global minimum. The global minimum is the minimum + number of matching pods in an eligible domain or + zero if the number of eligible domains is less than + MinDomains. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum + is 1. | zone1 | zone2 | zone3 | | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled + to zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can + be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default + value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: 'MinDomains indicates a minimum number + of eligible domains. When the number of eligible + domains with matching topology keys is less than + minDomains, Pod Topology Spread treats "global minimum" + as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching + topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, + when the number of eligible domains is less than + minDomains, scheduler won''t schedule more than + maxSkew Pods to those domains. If value is nil, + the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be + DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set + to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: | zone1 | zone2 | + zone3 | | P P | P P | P P | The number of + domains is less than 5(MinDomains), so "global minimum" + is treated as 0. In this situation, new pod with + the same labelSelector cannot be scheduled, because + computed skew will be 3(3 - 0) if new Pod is scheduled + to any of the three zones, it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default).' + format: int32 + type: integer + nodeAffinityPolicy: + description: 'NodeAffinityPolicy indicates how we + will treat Pod''s nodeAffinity/nodeSelector when + calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + nodeTaintsPolicy: + description: 'NodeTaintsPolicy indicates how we will + treat node taints when calculating pod topology + spread skew. Options are: - Honor: nodes without + taints, along with tainted nodes for which the incoming + pod has a toleration, are included. - Ignore: node + taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent + to the Ignore policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", and + try to put balanced number of pods into each bucket. + We define a domain as a particular instance of a + topology. Also, we define an eligible domain as + a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if + TopologyKey is "topology.kubernetes.io/zone", each + zone is a domain of that topology. It's a required + field. + type: string + whenUnsatisfiable: + description: "WhenUnsatisfiable indicates how to deal\ + \ with a pod if it doesn't satisfy the spread constraint.\ + \ - DoNotSchedule (default) tells the scheduler\ + \ not to schedule it. - ScheduleAnyway tells the\ + \ scheduler to schedule the pod in any location,\n\ + \ but giving higher precedence to topologies that\ + \ would help reduce the\n skew.\nA constraint is\ + \ considered \"Unsatisfiable\" for an incoming pod\ + \ if and only if every possible node assignment\ + \ for that pod would violate \"MaxSkew\" on some\ + \ topology. For example, in a 3-zone cluster, MaxSkew\ + \ is set to 1, and pods with the same labelSelector\ + \ spread as 3/1/1: | zone1 | zone2 | zone3 | | P\ + \ P P | P | P | If WhenUnsatisfiable is\ + \ set to DoNotSchedule, incoming pod can only be\ + \ scheduled to zone2(zone3) to become 3/2/1(3/1/2)\ + \ as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1).\ + \ In other words, the cluster can still be imbalanced,\ + \ but scheduler won't make it *more* imbalanced.\ + \ It's a required field." + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + backup: + type: object + description: Backup Pod custom scheduling and affinity configuration. + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must + be true for the pod to fit on a node. Selector which + must match a node''s labels for the pod to be scheduled + on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to + tolerates any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; + this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and + Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the + period of time the toleration (which must be + of effect NoExecute, otherwise this field is + ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node matches the corresponding + matchExpressions; the node(s) with the highest + sum are the most preferred. + items: + description: An empty preferred scheduling term + matches all objects with implicit weight 0 (i.e. + it's a no-op). A null preferred scheduling term + matches no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union + of the results of one or more label queries over + a set of nodes; that is, it represents the OR + of the selectors represented by the node selector + terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. + "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate the highest + priorities with the former being the highest priority. + Any other name must be defined by creating a PriorityClass + object with that name. If not specified, the pod priority + will be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. If + the affinity requirements specified by this field + cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may + or may not try to eventually evict the pod from + its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter + pod anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. If + the anti-affinity requirements specified by this + field cease to be met at some point during pod + execution (e.g. due to a pod label update), the + system may or may not try to eventually evict + the pod from its node. When there are multiple + elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + managementPolicy: + type: string + description: "managementPolicy controls how pods are created\ + \ during initial scale up, when replacing pods\n on nodes,\ + \ or when scaling down. The default policy is `OrderedReady`,\ + \ where pods are created\n in increasing order (pod-0, then\ + \ pod-1, etc) and the controller will wait until each pod\ + \ is\n ready before continuing. When scaling down, the pods\ + \ are removed in the opposite order.\n The alternative policy\ + \ is `Parallel` which will create pods in parallel to match\ + \ the desired\n scale without waiting, and on scale down\ + \ will delete all pods at once.\n" + default: OrderedReady + customVolumes: + type: array + description: "A list of custom volumes that may be used along\ + \ with any container defined in\n customInitContainers or\ + \ customContainers sections.\n\nThe name used in this section\ + \ will be prefixed with the string `c-` so that when\n referencing\ + \ them in the customInitContainers or customContainers sections\ + \ the name used\n have to be prepended with the same prefix.\n\ + \nOnly the following volume types are allowed: configMap,\ + \ downwardAPI, emptyDir,\n gitRepo, glusterfs, hostPath,\ + \ nfs, projected and secret\n\n**Changing this field may require\ + \ a restart.**\n \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + items: + type: object + description: "A custom volume that may be used along with\ + \ any container defined in\n customInitContainers or customContainers\ + \ sections.\n\nThe name used in this section will be prefixed\ + \ with the string `c-` so that when\n referencing them in\ + \ the customInitContainers or customContainers sections\ + \ the name used\n have to be prepended with the same prefix.\n\ + \nOnly the following volume types are allowed: configMap,\ + \ downwardAPI, emptyDir,\n gitRepo, glusterfs, hostPath,\ + \ nfs, projected and secret\n\n**Changing this field may\ + \ require a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + required: + - name + properties: + name: + description: 'name of the custom volume. The name will + be implicitly prefixed with `c-` to avoid clashing with + internal operator volume names. Must be a DNS_LABEL + and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + ' + type: string + configMap: + description: 'Adapts a ConfigMap into a volume. + + + The contents of the target ConfigMap''s Data field will + be presented in a volume as files using the keys in + the Data field as the file names, unless the items element + is populated with specific mappings of keys to paths. + ConfigMap volumes support ownership management and SELinux + relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#configmapvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. If + a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked + optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file. Must be an + octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both + octal and decimal values, JSON requires decimal + values for mode bits. If not specified, the + volume defaultMode will be used. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the + file to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: 'DownwardAPIVolumeSource represents a volume + containing downward API info. Downward API volumes support + ownership management and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#downwardapivolumesource-v1-core' + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: ObjectFieldSelector selects an + APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set + permissions on this file, must be an octal + value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal + and decimal values, JSON requires decimal + values for mode bits. If not specified, the + volume defaultMode will be used. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of the + relative path must not start with ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and their + output format + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling in\ + \ JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t(Note\ + \ that may be empty, from the\ + \ \"\" case in .)\n\n\ + \ ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k |\ + \ M | G | T | P | E\n\n\t(Note that 1024\ + \ = 1Ki but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the\ + \ future if we require larger or smaller\ + \ quantities.\n\nWhen a Quantity is parsed\ + \ from a string, it will remember the\ + \ type of suffix it had, and will use\ + \ the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be\ + \ put in \"canonical form\". This means\ + \ that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional\ + \ digits will be emitted - The exponent\ + \ (or suffix) is as large as possible.\n\ + \nThe sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\n\ + - 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are\ + \ well formed, but will be re-emitted\ + \ in their canonical form. (So always\ + \ use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code in\ + \ the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'Represents an empty directory for a pod. + Empty directory volumes support ownership management + and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#emptydirvolumesource-v1-core' + properties: + medium: + description: 'medium represents what type of storage + medium should back this directory. The default is + "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More + info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String() and\ + \ AsInt64() accessors.\n\nThe serialization format\ + \ is:\n\n``` ::= \n\ + \n\t(Note that may be empty, from the \"\ + \" case in .)\n\n ::=\ + \ 0 | 1 | ... | 9 ::= \ + \ | ::= \ + \ | . | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | | \ + \ ::= Ki | Mi | Gi | Ti | Pi |\ + \ Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T\ + \ | P | E\n\n\t(Note that 1024 = 1Ki but 1000 =\ + \ 1k; I didn't choose the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three exponent forms\ + \ is used, no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have more\ + \ than 3 decimal places. Numbers larger or more\ + \ precise will be capped or rounded up. (E.g.: 0.1m\ + \ will rounded up to 1m.) This may be extended in\ + \ the future if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it will\ + \ remember the type of suffix it had, and will use\ + \ the same type again when it is serialized.\n\n\ + Before serializing, Quantity will be put in \"canonical\ + \ form\". This means that Exponent/suffix will be\ + \ adjusted up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n- No precision\ + \ is lost - No fractional digits will be emitted\ + \ - The exponent (or suffix) is as large as possible.\n\ + \nThe sign will be omitted unless the number is\ + \ negative.\n\nExamples:\n\n- 1.5 will be serialized\ + \ as \"1500m\" - 1.5Gi will be serialized as \"\ + 1536Mi\"\n\nNote that the quantity will NEVER be\ + \ internally represented by a floating point number.\ + \ That is the whole point of this exercise.\n\n\ + Non-canonical values will still parse as long as\ + \ they are well formed, but will be re-emitted in\ + \ their canonical form. (So always use canonical\ + \ form, or don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers without\ + \ writing some sort of special handling code in\ + \ the hopes that that will cause implementors to\ + \ also use a fixed point implementation." + type: string + type: object + gitRepo: + description: 'Represents a volume that is populated with + the contents of a git repository. Git repo volumes do + not support ownership management. Git repo volumes support + SELinux relabeling. + + + DEPRECATED: GitRepo is deprecated. To provision a container + with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir + into the Pod''s container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#gitrepovolumesource-v1-core' + properties: + directory: + description: directory is the target directory name. + Must not contain or start with '..'. If '.' is + supplied, the volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Represents a Glusterfs mount that lasts + the lifetime of a pod. Glusterfs volumes do not support + ownership management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#glusterfsvolumesource-v1-core' + properties: + endpoints: + description: 'endpoints is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More + info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs + volume to be mounted with read-only permissions. + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'Represents a host path mapped into a pod. + Host path volumes do not support ownership management + or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#hostpathvolumesource-v1-core' + properties: + path: + description: 'path of the directory on the host. If + the path is a symlink, it will follow the link to + the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to + "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + nfs: + description: 'Represents an NFS mount that lasts the lifetime + of a pod. NFS volumes do not support ownership management + or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nfsvolumesource-v1-core' + properties: + path: + description: 'path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export + to be mounted with read-only permissions. Defaults + to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address + of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - server + - path + type: object + projected: + description: 'Represents a projected volume source + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#projectedvolumesource-v1-core' + properties: + defaultMode: + description: defaultMode are the mode bits used to + set permissions on created files by default. Must + be an octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both octal + and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not + affected by this setting. This might be in conflict + with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundleProjection describes + how to select a set of ClusterTrustBundle + objects and project their contents into the + pod filesystem. + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions are + ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with + signerName and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) + aren't available. If using name, then + the named ClusterTrustBundle is allowed + not to exist. If using signerName, then + the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles + that match this signer name. Mutually-exclusive + with name. The contents of all selected + ClusterTrustBundles will be unified and + deduplicated. + type: string + required: + - path + type: object + configMap: + description: 'Adapts a ConfigMap into a projected + volume. + + + The contents of the target ConfigMap''s Data + field will be presented in a projected volume + as files using the keys in the Data field + as the file names, unless the items element + is populated with specific mappings of keys + to paths. Note that this is identical to a + configmap volume source without the default + mode.' + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of the + referenced ConfigMap will be projected + into the volume as a file whose name is + the key and content is the value. If specified, + the listed keys will be projected into + the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the ConfigMap, + the volume setup will error unless it + is marked optional. Paths must be relative + and may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on + this file. Must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: Represents downward API info for + projecting into a projected volume. Note that + this is identical to a downwardAPI volume + source without the default mode. + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in + terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified API + version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value between + 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector + represents container resources (cpu, + memory) and their output format + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number.\ + \ It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition\ + \ to String() and AsInt64()\ + \ accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\ + \n\t(Note that may\ + \ be empty, from the \"\" case\ + \ in .)\n\n\ + \ ::= 0 | 1 | ...\ + \ | 9 ::=\ + \ | \ + \ ::= \ + \ | . | .\ + \ | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki |\ + \ Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units;\ + \ See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m |\ + \ \"\" | k | M | G | T | P |\ + \ E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" |\ + \ \"E\" ```\n\ + \nNo matter which of the three\ + \ exponent forms is used, no\ + \ quantity may represent a number\ + \ greater than 2^63-1 in magnitude,\ + \ nor may it have more than\ + \ 3 decimal places. Numbers\ + \ larger or more precise will\ + \ be capped or rounded up. (E.g.:\ + \ 0.1m will rounded up to 1m.)\ + \ This may be extended in the\ + \ future if we require larger\ + \ or smaller quantities.\n\n\ + When a Quantity is parsed from\ + \ a string, it will remember\ + \ the type of suffix it had,\ + \ and will use the same type\ + \ again when it is serialized.\n\ + \nBefore serializing, Quantity\ + \ will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down\ + \ (with a corresponding increase\ + \ or decrease in Mantissa) such\ + \ that:\n\n- No precision is\ + \ lost - No fractional digits\ + \ will be emitted - The exponent\ + \ (or suffix) is as large as\ + \ possible.\n\nThe sign will\ + \ be omitted unless the number\ + \ is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be\ + \ serialized as \"1536Mi\"\n\ + \nNote that the quantity will\ + \ NEVER be internally represented\ + \ by a floating point number.\ + \ That is the whole point of\ + \ this exercise.\n\nNon-canonical\ + \ values will still parse as\ + \ long as they are well formed,\ + \ but will be re-emitted in\ + \ their canonical form. (So\ + \ always use canonical form,\ + \ or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without\ + \ writing some sort of special\ + \ handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point\ + \ implementation." + type: string + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: 'Adapts a secret into a projected + volume. + + + The contents of the target Secret''s Data + field will be presented in a projected volume + as files using the keys in the Data field + as the file names. Note that this is identical + to a secret volume source without the default + mode.' + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of the + referenced Secret will be projected into + the volume as a file whose name is the + key and content is the value. If specified, + the listed keys will be projected into + the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the Secret, the + volume setup will error unless it is marked + optional. Paths must be relative and may + not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on + this file. Must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: ServiceAccountTokenProjection represents + a projected service account token volume. + This projection can be used to insert a service + account token into the pods runtime filesystem + for use against APIs (Kubernetes API Server + or otherwise). + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must + identify itself with an identifier specified + in the audience of the token, and otherwise + should reject the token. The audience + defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, + the kubelet volume plugin will proactively + rotate the service account token. The + kubelet will start trying to rotate the + token if the token is older than 80 percent + of its time to live or if the token is + older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to + the mount point of the file to project + the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + secret: + description: 'Adapts a Secret into a volume. + + + The contents of the target Secret''s Data field will + be presented in a volume as files using the keys in + the Data field as the file names. Secret volumes support + ownership management and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. If + a key is specified which is not present in the Secret, + the volume setup will error unless it is marked + optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file. Must be an + octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both + octal and decimal values, JSON requires decimal + values for mode bits. If not specified, the + volume defaultMode will be used. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the + file to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret + in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource references + the user''s PVC in the same namespace. This volume finds + the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource + is, essentially, a wrapper around another type of volume + that is owned by someone else (the system). + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#persistentvolumeclaimvolumesource-v1-core' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting + in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + customInitContainers: + type: array + description: "A list of custom application init containers that\ + \ run within the cluster's Pods. The\n custom init containers\ + \ will run following the defined sequence as the end of\n\ + \ cluster's Pods init containers.\n\nThe name used in this\ + \ section will be prefixed with the string `c-` so that when\n\ + \ referencing them in the .spec.containers section of SGInstanceProfile\ + \ the name used\n have to be prepended with the same prefix.\n\ + \n**Changing this field may require a restart.**\n \nSee:\ + \ https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application init container that run\ + \ within the cluster's Pods. The custom init\n containers\ + \ will run following the defined sequence as the end of\ + \ cluster's Pods init\n containers.\n\nThe name used in\ + \ this section will be prefixed with the string `c-` so\ + \ that when\n referencing them in the .spec.containers section\ + \ of SGInstanceProfile the name used\n have to be prepended\ + \ with the same prefix.\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are + reduced to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a + shell. The container image''s ENTRYPOINT is used if + this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If + a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the + container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are + expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: EnvVarSource represents a source for + the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects an + APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and their + output format + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling in\ + \ JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t(Note\ + \ that may be empty, from the\ + \ \"\" case in .)\n\n\ + \ ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k |\ + \ M | G | T | P | E\n\n\t(Note that 1024\ + \ = 1Ki but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the\ + \ future if we require larger or smaller\ + \ quantities.\n\nWhen a Quantity is parsed\ + \ from a string, it will remember the\ + \ type of suffix it had, and will use\ + \ the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be\ + \ put in \"canonical form\". This means\ + \ that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional\ + \ digits will be emitted - The exponent\ + \ (or suffix) is as large as possible.\n\ + \nThe sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\n\ + - 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are\ + \ well formed, but will be re-emitted\ + \ in their canonical form. (So always\ + \ use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code in\ + \ the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects a key + of a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must + be a C_IDENTIFIER. All invalid keys will be reported + as an event when the container is starting. When a key + exists in multiple sources, the value associated with + the last source will take precedence. Values defined + by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects a ConfigMap + to populate the environment variables with. + + + The contents of the target ConfigMap''s Data field + will represent the key-value pairs as environment + variables.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects a Secret to + populate the environment variables with. + + + The contents of the target Secret''s Data field + will represent the key-value pairs as environment + variables.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images in + workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is specified, + or IfNotPresent otherwise. Cannot be updated. More info: + https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that the management + system should take in response to container lifecycle + events. For the PostStart and PreStop lifecycle handlers, + management of the container blocks until the action + is complete, unless the container process fails, in + which case the handler is aborted. + properties: + postStart: + description: LifecycleHandler defines a specific action + that should be taken in a lifecycle hook. One and + only one of the fields, except TCPSocket must be + specified. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" action. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a specific action + that should be taken in a lifecycle hook. One and + only one of the fields, except TCPSocket must be + specified. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" action. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check to be performed + against a container to determine whether it is alive + or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory + for the command is root ('/') in the container's + filesystem. The command is simply exec'd, it + is not run inside a shell, so traditional shell + instructions ('|', etc) won't work. To use a + shell, you need to explicitly call out to that + shell. Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action based + on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. + Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action based + on opening a socket + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the + value provided by the pod spec. Value must be non-negative + integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that port + from being exposed. Any port which is listening on the + default "0.0.0.0" address inside a container will be + accessible from the network. Modifying this array with + strategic merge patch may corrupt the data. For more + information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, this + must match ContainerPort. Most containers do not + need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the port + that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check to be performed + against a container to determine whether it is alive + or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory + for the command is root ('/') in the container's + filesystem. The command is simply exec'd, it + is not run inside a shell, so traditional shell + instructions ('|', etc) won't work. To use a + shell, you need to explicitly call out to that + shell. Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action based + on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. + Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action based + on opening a socket + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the + value provided by the pod spec. Value must be non-negative + integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: 'Claims lists the names of resources, + defined in spec.resourceClaims, that are used by + this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for + containers.' + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the Pod + where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String() and\ + \ AsInt64() accessors.\n\nThe serialization format\ + \ is:\n\n``` ::= \n\ + \n\t(Note that may be empty, from the\ + \ \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::=\ + \ | \ + \ ::= | . | .\ + \ | . ::= \"+\" | \"\ + -\" ::= | \ + \ ::= | \ + \ | ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International System\ + \ of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G |\ + \ T | P | E\n\n\t(Note that 1024 = 1Ki but 1000\ + \ = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" |\ + \ \"E\" ```\n\nNo matter which\ + \ of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in\ + \ magnitude, nor may it have more than 3 decimal\ + \ places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the future\ + \ if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had, and\ + \ will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in\ + \ \"canonical form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix) is\ + \ as large as possible.\n\nThe sign will be omitted\ + \ unless the number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\" - 1.5Gi\ + \ will be serialized as \"1536Mi\"\n\nNote that\ + \ the quantity will NEVER be internally represented\ + \ by a floating point number. That is the whole\ + \ point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical form.\ + \ (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing some sort\ + \ of special handling code in the hopes that that\ + \ will cause implementors to also use a fixed\ + \ point implementation." + type: string + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String() and\ + \ AsInt64() accessors.\n\nThe serialization format\ + \ is:\n\n``` ::= \n\ + \n\t(Note that may be empty, from the\ + \ \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::=\ + \ | \ + \ ::= | . | .\ + \ | . ::= \"+\" | \"\ + -\" ::= | \ + \ ::= | \ + \ | ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International System\ + \ of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G |\ + \ T | P | E\n\n\t(Note that 1024 = 1Ki but 1000\ + \ = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" |\ + \ \"E\" ```\n\nNo matter which\ + \ of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in\ + \ magnitude, nor may it have more than 3 decimal\ + \ places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the future\ + \ if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had, and\ + \ will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in\ + \ \"canonical form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix) is\ + \ as large as possible.\n\nThe sign will be omitted\ + \ unless the number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\" - 1.5Gi\ + \ will be serialized as \"1536Mi\"\n\nNote that\ + \ the quantity will NEVER be internally represented\ + \ by a floating point number. That is the whole\ + \ point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical form.\ + \ (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing some sort\ + \ of special handling code in the hopes that that\ + \ will cause implementors to also use a fixed\ + \ point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may only + be set for init containers, and the only allowed value + is "Always". For non-init containers or when this field + is not specified, the restart behavior is defined by + the Pod''s restart policy and the container type. Setting + the RestartPolicy as "Always" for the init container + will have the following effect: this init container + will be continually restarted on exit until all regular + containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy + "Always" will be shut down. This lifecycle differs from + normal init containers and is often referred to as a + "sidecar" container. Although this init container still + starts in the init container sequence, it does not wait + for the container to complete before proceeding to the + next init container. Instead, the next init container + starts immediately after this init container is started, + or after any startupProbe has successfully completed.' + type: string + securityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields are + present in both SecurityContext and PodSecurityContext. When + both are set, the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent + process. This bool directly controls if the no_new_privs + flag will be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as + Privileged 2) has CAP_SYS_ADMIN Note that this field + cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities from + running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount + to use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field + cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set in + both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will validate + the image at runtime to ensure that it does not + run as UID 0 (root) and fail to start the container + if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified in + image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be + set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels to be applied + to the container + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile source + may be set. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: 'type indicates which kind of seccomp + profile will be applied. Valid options are: + + + Localhost - a profile defined in a file on the + node should be used. RuntimeDefault - the container + runtime default profile should be used. Unconfined + - no profile should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions contain + Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if + HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the + entrypoint of the container process. Defaults + to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to be performed + against a container to determine whether it is alive + or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory + for the command is root ('/') in the container's + filesystem. The command is simply exec'd, it + is not run inside a shell, so traditional shell + instructions ('|', etc) won't work. To use a + shell, you need to explicitly call out to that + shell. Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action based + on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. + Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action based + on opening a socket + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the + value provided by the pod spec. Value must be non-negative + integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a + buffer for stdin in the container runtime. If this is + not set, reads from stdin in the container will always + result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce is + set to true, stdin is opened on container start, is + empty until the first client attaches to stdin, and + then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such as + an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length + across all containers will be limited to 12kb. Defaults + to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a + TTY for itself, also requires 'stdin' to be true. Default + is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot be + updated. + type: string + customContainers: + type: array + description: "A list of custom application containers that run\ + \ within the cluster's Pods.\n\nThe name used in this section\ + \ will be prefixed with the string `c-` so that when\n referencing\ + \ them in the .spec.containers section of SGInstanceProfile\ + \ the name used\n have to be prepended with the same prefix.\n\ + \n**Changing this field may require a restart.**\n \nSee:\ + \ https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application container that run within\ + \ the cluster's Pods. The custom\n containers will run following\ + \ the defined sequence as the end of cluster's Pods\n containers.\n\ + \nThe name used in this section will be prefixed with the\ + \ string `c-` so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n have to be\ + \ prepended with the same prefix.\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are + reduced to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a + shell. The container image''s ENTRYPOINT is used if + this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If + a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the + container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are + expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: EnvVarSource represents a source for + the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects an + APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and their + output format + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling in\ + \ JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t(Note\ + \ that may be empty, from the\ + \ \"\" case in .)\n\n\ + \ ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k |\ + \ M | G | T | P | E\n\n\t(Note that 1024\ + \ = 1Ki but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the\ + \ future if we require larger or smaller\ + \ quantities.\n\nWhen a Quantity is parsed\ + \ from a string, it will remember the\ + \ type of suffix it had, and will use\ + \ the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be\ + \ put in \"canonical form\". This means\ + \ that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional\ + \ digits will be emitted - The exponent\ + \ (or suffix) is as large as possible.\n\ + \nThe sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\n\ + - 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are\ + \ well formed, but will be re-emitted\ + \ in their canonical form. (So always\ + \ use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code in\ + \ the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects a key + of a Secret. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must + be a C_IDENTIFIER. All invalid keys will be reported + as an event when the container is starting. When a key + exists in multiple sources, the value associated with + the last source will take precedence. Values defined + by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects a ConfigMap + to populate the environment variables with. + + + The contents of the target ConfigMap''s Data field + will represent the key-value pairs as environment + variables.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects a Secret to + populate the environment variables with. + + + The contents of the target Secret''s Data field + will represent the key-value pairs as environment + variables.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images in + workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is specified, + or IfNotPresent otherwise. Cannot be updated. More info: + https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that the management + system should take in response to container lifecycle + events. For the PostStart and PreStop lifecycle handlers, + management of the container blocks until the action + is complete, unless the container process fails, in + which case the handler is aborted. + properties: + postStart: + description: LifecycleHandler defines a specific action + that should be taken in a lifecycle hook. One and + only one of the fields, except TCPSocket must be + specified. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" action. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a specific action + that should be taken in a lifecycle hook. One and + only one of the fields, except TCPSocket must be + specified. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" action. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check to be performed + against a container to determine whether it is alive + or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory + for the command is root ('/') in the container's + filesystem. The command is simply exec'd, it + is not run inside a shell, so traditional shell + instructions ('|', etc) won't work. To use a + shell, you need to explicitly call out to that + shell. Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action based + on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. + Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action based + on opening a socket + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the + value provided by the pod spec. Value must be non-negative + integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that port + from being exposed. Any port which is listening on the + default "0.0.0.0" address inside a container will be + accessible from the network. Modifying this array with + strategic merge patch may corrupt the data. For more + information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, this + must match ContainerPort. Most containers do not + need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the port + that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check to be performed + against a container to determine whether it is alive + or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory + for the command is root ('/') in the container's + filesystem. The command is simply exec'd, it + is not run inside a shell, so traditional shell + instructions ('|', etc) won't work. To use a + shell, you need to explicitly call out to that + shell. Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action based + on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. + Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action based + on opening a socket + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the + value provided by the pod spec. Value must be non-negative + integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this + resource resize policy applies. Supported values: + cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it defaults + to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: 'Claims lists the names of resources, + defined in spec.resourceClaims, that are used by + this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for + containers.' + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the Pod + where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String() and\ + \ AsInt64() accessors.\n\nThe serialization format\ + \ is:\n\n``` ::= \n\ + \n\t(Note that may be empty, from the\ + \ \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::=\ + \ | \ + \ ::= | . | .\ + \ | . ::= \"+\" | \"\ + -\" ::= | \ + \ ::= | \ + \ | ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International System\ + \ of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G |\ + \ T | P | E\n\n\t(Note that 1024 = 1Ki but 1000\ + \ = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" |\ + \ \"E\" ```\n\nNo matter which\ + \ of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in\ + \ magnitude, nor may it have more than 3 decimal\ + \ places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the future\ + \ if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had, and\ + \ will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in\ + \ \"canonical form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix) is\ + \ as large as possible.\n\nThe sign will be omitted\ + \ unless the number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\" - 1.5Gi\ + \ will be serialized as \"1536Mi\"\n\nNote that\ + \ the quantity will NEVER be internally represented\ + \ by a floating point number. That is the whole\ + \ point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical form.\ + \ (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing some sort\ + \ of special handling code in the hopes that that\ + \ will cause implementors to also use a fixed\ + \ point implementation." + type: string + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String() and\ + \ AsInt64() accessors.\n\nThe serialization format\ + \ is:\n\n``` ::= \n\ + \n\t(Note that may be empty, from the\ + \ \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 ::=\ + \ | \ + \ ::= | . | .\ + \ | . ::= \"+\" | \"\ + -\" ::= | \ + \ ::= | \ + \ | ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International System\ + \ of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G |\ + \ T | P | E\n\n\t(Note that 1024 = 1Ki but 1000\ + \ = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" |\ + \ \"E\" ```\n\nNo matter which\ + \ of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1 in\ + \ magnitude, nor may it have more than 3 decimal\ + \ places. Numbers larger or more precise will\ + \ be capped or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the future\ + \ if we require larger or smaller quantities.\n\ + \nWhen a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had, and\ + \ will use the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be put in\ + \ \"canonical form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix) is\ + \ as large as possible.\n\nThe sign will be omitted\ + \ unless the number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\" - 1.5Gi\ + \ will be serialized as \"1536Mi\"\n\nNote that\ + \ the quantity will NEVER be internally represented\ + \ by a floating point number. That is the whole\ + \ point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical form.\ + \ (So always use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing some sort\ + \ of special handling code in the hopes that that\ + \ will cause implementors to also use a fixed\ + \ point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may only + be set for init containers, and the only allowed value + is "Always". For non-init containers or when this field + is not specified, the restart behavior is defined by + the Pod''s restart policy and the container type. Setting + the RestartPolicy as "Always" for the init container + will have the following effect: this init container + will be continually restarted on exit until all regular + containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy + "Always" will be shut down. This lifecycle differs from + normal init containers and is often referred to as a + "sidecar" container. Although this init container still + starts in the init container sequence, it does not wait + for the container to complete before proceeding to the + next init container. Instead, the next init container + starts immediately after this init container is started, + or after any startupProbe has successfully completed.' + type: string + securityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields are + present in both SecurityContext and PodSecurityContext. When + both are set, the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent + process. This bool directly controls if the no_new_privs + flag will be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as + Privileged 2) has CAP_SYS_ADMIN Note that this field + cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities from + running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount + to use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field + cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set in + both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will validate + the image at runtime to ensure that it does not + run as UID 0 (root) and fail to start the container + if it does. If unset or false, no such validation + will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified in + image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be + set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels to be applied + to the container + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile source + may be set. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: 'type indicates which kind of seccomp + profile will be applied. Valid options are: + + + Localhost - a profile defined in a file on the + node should be used. RuntimeDefault - the container + runtime default profile should be used. Unconfined + - no profile should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions contain + Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + All of a Pod's containers must have the same + effective HostProcess value (it is not allowed + to have a mix of HostProcess containers and + non-HostProcess containers). In addition, if + HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the + entrypoint of the container process. Defaults + to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to be performed + against a container to determine whether it is alive + or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory + for the command is root ('/') in the container's + filesystem. The command is simply exec'd, it + is not run inside a shell, so traditional shell + instructions ('|', etc) won't work. To use a + shell, you need to explicitly call out to that + shell. Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action based + on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This + will be canonicalized upon output, so + case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. + Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action based + on opening a socket + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can hold + an int32 or a string. When used in JSON or + YAML marshalling and unmarshalling, it produces + or consumes the inner type. This allows you + to have, for example, a JSON field that can + accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the + value provided by the pod spec. Value must be non-negative + integer. The value zero indicates stop immediately + via the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod + feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe + times out. Defaults to 1 second. Minimum value is + 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a + buffer for stdin in the container runtime. If this is + not set, reads from stdin in the container will always + result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce is + set to true, stdin is opened on container start, is + empty until the first client attaches to stdin, and + then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such as + an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length + across all containers will be limited to 12kb. Defaults + to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a + TTY for itself, also requires 'stdin' to be true. Default + is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot be + updated. + type: string + customVolumeMounts: + type: object + description: Custom Pod volumes to mount into the specified + container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the specified + container's filesystem. + items: + description: 'VolumeMount describes a mounting of a Volume + within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at which the + volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and the + other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's + root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves + similarly to SubPath but environment variable references + $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + customInitVolumeMounts: + type: object + description: Custom Pod volumes to mount into the specified + init container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the specified + init container's filesystem. + items: + description: 'VolumeMount describes a mounting of a Volume + within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at which the + volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and the + other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's + root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves + similarly to SubPath but environment variable references + $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + configurations: + type: object + description: "Cluster custom configurations.\n\n**Example:**\n\n\ + ``` yaml\napiVersion: stackgres.io/v1\nkind: SGCluster\nmetadata:\n\ + \ name: stackgres\nspec:\n configurations:\n sgPostgresConfig:\ + \ 'postgresconf'\n sgPoolingConfig: 'pgbouncerconf'\n backups:\n\ + \ - sgObjectStorage: 'backupconf'\n```\n" + properties: + sgPostgresConfig: + type: string + description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. + + + It must exist. When not set, a default Postgres config, for + the major version selected, is used. + + + **Changing this field may require a restart.** + + ' + sgPoolingConfig: + type: string + description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. + + + Each pod contains a sidecar with a connection pooler (currently: + [PgBouncer](https://www.pgbouncer.org/)). The connection pooler + is implemented as a sidecar. + + + If not set, a default configuration will be used. Disabling + connection pooling altogether is possible if the disableConnectionPooling + property of the pods object is set to true. + + + **Changing this field may require a restart.** + + ' + observability: + type: object + description: Allow to specify Observability configuration (related + to logs, metrics and traces) + properties: + disableMetrics: + type: boolean + description: 'If set to `true`, avoids creating the Prometheus + exporter sidecar. Recommended when there''s no intention + to use internal monitoring. + + + **Changing this field may require a restart.** + + ' + default: false + receiver: + type: string + description: Indicate the receiver name (for type prometheus) + in the configuration for the collector scraper (if not + specified the default empty name will be used). + prometheusAutobind: + type: boolean + description: If set to `true`, a PodMonitor is created for + each Prometheus instance as specified in the SGConfig.spec.collector.prometheusOperator.monitors + section. + default: false + backups: + type: array + description: 'List of backups configurations for this SGCluster + + ' + items: + type: object + description: 'Backup configuration for this SGCluster + + ' + required: + - sgObjectStorage + properties: + compression: + type: string + description: 'Specifies the backup compression algorithm. + Possible options are: lz4, lzma, brotli. The default + method is `lz4`. LZ4 is the fastest method, but compression + ratio is the worst. LZMA is way slower, but it compresses + backups about 6 times better than LZ4. Brotli is a good + trade-off between speed and compression ratio, being + about 3 times better than LZ4. + + ' + enum: + - lz4 + - lzma + - brotli + cronSchedule: + type: string + description: 'Continuous Archiving backups are composed + of periodic *base backups* and all the WAL segments + produced in between those base backups. This parameter + specifies at what time and with what frequency to start + performing a new base backup. + + + Use cron syntax (`m h dom mon dow`) for this parameter, + i.e., 5 values separated by spaces: + + * `m`: minute, 0 to 59. + + * `h`: hour, 0 to 23. + + * `dom`: day of month, 1 to 31 (recommended not to + set it higher than 28). + + * `mon`: month, 1 to 12. + + * `dow`: day of week, 0 to 7 (0 and 7 both represent + Sunday). + + + Also ranges of values (`start-end`), the symbol `*` + (meaning `first-last`) or even `*/N`, where `N` is a + number, meaning ""every `N`, may be used. All times + are UTC. It is recommended to avoid 00:00 as base backup + time, to avoid overlapping with any other external operations + happening at this time. + + + If not set, full backups are never performed automatically. + + ' + performance: + type: object + description: 'Configuration that affects the backup network + and disk usage performance. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth used + when storing a backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + uploadDiskConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to store the data. This parameter configures + the number of parallel streams to use to reading + from disk. By default, it''s set to 1. + + ' + uploadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to store the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to 16. + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to read the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to the minimum between the number of file + to read and 10. + + ' + retention: + type: integer + minimum: 1 + description: 'When an automatic retention policy is defined + to delete old base backups, this parameter specifies + the number of base backups to keep, in a sliding window. + + + Consequently, the time range covered by backups is `periodicity*retention`, + where `periodicity` is the separation between backups + as specified by the `cronSchedule` property. + + + Default is 5. + + ' + default: 5 + sgObjectStorage: + type: string + description: 'Name of the [SGObjectStorage](https://stackgres.io/doc/latest/reference/crd/sgobjectstorage) + to use for the cluster. + + + It defines the location in which the the backups will + be stored. + + ' + path: + type: string + description: "The path were the backup is stored. If not\ + \ set this field is filled up by the operator.\n\nWhen\ + \ provided will indicate were the backups and WAL files\ + \ will be stored.\n\n> **WARNING**: Most users should\ + \ leave this field empty since having it manually set\ + \ could be dangerous. If the value is repeated due to\ + \ re-creating an SGCluster or\n re-using the same value\ + \ in another SGCluster and you may get a mixed WAL history\ + \ with unrecoverable backups.\n" + useVolumeSnapshot: + type: boolean + description: 'If specified SGBackup will use VolumeSnapshot + to create backups. + + + This functionality still require to store WAL files + in an SGObjectStorage but could result in much faster + backups and restore of those backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + volumeSnapshotClass: + type: string + description: 'The name of the VolumeSnaphostClass to use + to create the VolumeSnapshot for backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + fastVolumeSnapshot: + type: boolean + description: 'If specified SGBackup will create a backup + forcing a fast start (by setting parameter `fast` to + `true` when calling `pg_backup_start`) that will reduce + the time the backups may take at the expense of more + IO usage. + + + See also https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-LOWLEVEL-BASE-BACKUP + + ' + timeout: + type: integer + description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation + will continue until the backup completes or fail. If + set to 0 is the same as not being set. + + + Make sure to set a reasonable high value in order to + allow for any unexpected delays during backup creation + (network low bandwidth, disk low throughput and so forth). + + ' + reconciliationTimeout: + type: integer + default: 300 + description: "Allow to set a timeout for the reconciliation\ + \ process that take place after the backup.\n\nIf not\ + \ set defaults to 300 (5 minutes). If set to 0 it will\ + \ disable timeout.\n\nFailure of reconciliation will\ + \ not make the backup fail and will be re-tried the\ + \ next time a SGBackup\n or shecduled backup Job take\ + \ place.\n" + maxRetries: + type: integer + description: 'The maximum number of retries the backup + operation is allowed to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults + to: `3`. + + ' + retainWalsForUnmanagedLifecycle: + type: boolean + description: 'If specified, WAL created after any unmanaged + lifecycle backups will be retained. + + ' + patroni: + type: object + description: Allow to specify Patroni configuration that will + extend the generated one + properties: + dynamicConfig: + type: object + description: 'Allow to specify Patroni dynamic configuration + that will overwrite the generated one. See https://patroni.readthedocs.io/en/latest/dynamic_configuration.html + + + The following configuration fields will be ignored: + + + * synchronous_mode + + * synchronous_mode_strict + + * failsafe_mode + + * postgresql + + * standby_cluster + + ' + x-kubernetes-preserve-unknown-fields: true + initialConfig: + type: object + description: 'Allow to specify Patroni configuration that + will overwrite the generated one. See https://patroni.readthedocs.io/en/latest/yaml_configuration.html + + + The following configuration fields will be ignored: + + + * name + + * namespace + + * log + + * bootstrap + + * citus + + * postgresql # with the exception of postgresql.callbacks, + postgresql.pre_promote, postgresql.before_stop and postgresql.pg_ctl_timeout + + * restapi + + * ctl + + * watchdog + + * tags + + + **This field can only be set on creation.** + + ' + x-kubernetes-preserve-unknown-fields: true + credentials: + type: object + description: 'Allow to specify custom credentials for Postgres + users and Patroni REST API + + ' + properties: + patroni: + type: object + description: 'Kubernetes [SecretKeySelectors](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials for patroni REST API. + + + **Changing this field may require a restart.** + + ' + properties: + restApiPassword: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password for the patroni REST API. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + users: + type: object + description: "Kubernetes [SecretKeySelectors](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)\ + \ that contains the credentials of the users.\n\n**Changing\ + \ this field may require a manual modification of the\ + \ database users to reflect the new values specified.**\n\ + \nIn particular you may have to create those users if\ + \ username is changed or alter password if it is changed.\ + \ Here are the SQL commands to perform such operation\ + \ (replace\n default usernames with the new ones and\ + \ `***` with their respective passwords):\n\n* Superuser\ + \ username changed:\n```\nCREATE ROLE postgres;\n```\n\ + * Superuser password changed:\n```\nALTER ROLE postgres\ + \ WITH SUPERUSER INHERIT CREATEROLE CREATEDB LOGIN REPLICATION\ + \ BYPASSRLS PASSWORD '***';\n```\n* Replication username\ + \ changed:\n```\nCREATE ROLE replicator;\n```\n* Replication\ + \ password changed:\n```\nALTER ROLE replicator WITH NOSUPERUSER\ + \ INHERIT NOCREATEROLE NOCREATEDB LOGIN REPLICATION NOBYPASSRLS\ + \ PASSWORD '***';\n```\n* Authenticator username changed:\n\ + ```\nCREATE ROLE authenticator;\n```\n* Authenticator\ + \ password changed:\n```\nALTER ROLE authenticator WITH\ + \ SUPERUSER INHERIT NOCREATEROLE NOCREATEDB LOGIN NOREPLICATION\ + \ NOBYPASSRLS PASSWORD '***';\n```\n\n**Changing this\ + \ field may require a restart.**\n" + properties: + superuser: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the superuser (usually + the postgres user). + + ' + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + replication: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the replication user + used to replicate from the primary cluster and from + replicas of this cluster. + + ' + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + authenticator: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the authenticator + user used by pgbouncer to authenticate other users. + + ' + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + binding: + type: object + description: "This section allows to specify the properties\ + \ of [Service Binding spec for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service).\n\ + \ If not specified, then some default will be used.\n\nFor\ + \ more information see https://servicebinding.io/spec/core/1.0.0/\n" + properties: + provider: + type: string + description: It's the reference of custom provider name. + If not specified, then the default value will be `stackgres` + database: + type: string + description: Allow to specify the database name. If not + specified, then the default value is `postgres` + username: + type: string + description: Allow to specify the username. If not specified, + then the superuser username will be used. + password: + type: object + description: Allow to reference Secret that contains the + user's password. If not specified, then the superuser + password will be used. + properties: + name: + type: string + description: The name of the Secret + key: + type: string + description: The key of the Secret + managedSql: + type: object + description: 'This section allows to reference SQL scripts that + will be applied to the cluster live. + + ' + properties: + continueOnSGScriptError: + type: boolean + description: If true, when any entry of any `SGScript` fail + will not prevent subsequent `SGScript` from being executed. + By default is `false`. + default: false + scripts: + type: array + description: 'A list of script references that will be executed + in sequence. + + ' + items: + type: object + description: "A script reference. Each version of each entry\ + \ of the script referenced will be executed exactly once\ + \ following the sequence defined\n in the referenced script\ + \ and skipping any script entry that have already been executed.\n" + properties: + id: + type: integer + description: 'The id is immutable and must be unique across + all the `SGScript` entries. It is replaced by the operator + and is used to identify the `SGScript` entry. + + ' + sgScript: + type: string + description: A reference to an `SGScript` + initialData: + type: object + description: 'Cluster initialization data options. Cluster may be + initialized empty, or from a backup restoration. + + + **This field can only be set on creation.** + + ' + properties: + restore: + type: object + description: 'This section allows to restore a cluster from + an existing copy of the metadata and data. + + ' + properties: + fromBackup: + type: object + description: "From which backup to restore and how the process\ + \ is configured\n\n**Example:**\n\n```yaml\napiVersion:\ + \ stackgres.io/v1\nkind: SGCluster\nmetadata:\n name:\ + \ stackgres\nspec:\n initialData:\n restore:\n \ + \ fromBackup:\n name: stackgres-backup\n \ + \ downloadDiskConcurrency: 1\n```\n" + properties: + uid: + type: string + description: "When set to the UID of an existing [SGBackup](https://stackgres.io/doc/latest/reference/crd/sgbackup),\ + \ the cluster is initialized by restoring the\n backup\ + \ data to it. If not set, the cluster is initialized\ + \ empty. This field is deprecated.\n" + name: + type: string + description: "When set to the name of an existing [SGBackup](https://stackgres.io/doc/latest/reference/crd/sgbackup),\ + \ the cluster is initialized by restoring the\n backup\ + \ data to it. If not set, the cluster is initialized\ + \ empty. The selected backup must be in the same namespace.\n" + target: + type: string + description: "Specify the [recovery_target](https://postgresqlco.nf/doc/en/param/recovery_target/)\ + \ that specifies that recovery should end as soon\ + \ as a consistent\n state is reached, i.e., as early\ + \ as possible. When restoring from an online backup,\ + \ this means the point where taking the backup ended.\n\ + \n Technically, this is a string parameter, but 'immediate'\ + \ is currently the only allowed value.\n" + targetTimeline: + type: string + description: "Specify the [recovery_target_timeline](https://postgresqlco.nf/doc/en/param/recovery_target_timeline/)\ + \ to recover into a particular timeline.\n The default\ + \ is to recover along the same timeline that was current\ + \ when the base backup was taken. Setting this to\ + \ latest recovers to the latest\n timeline found\ + \ in the archive, which is useful in a standby server.\ + \ Other than that you only need to set this parameter\ + \ in complex re-recovery\n situations, where you\ + \ need to return to a state that itself was reached\ + \ after a point-in-time recovery.\n" + targetInclusive: + type: boolean + description: "Specify the [recovery_target_inclusive](https://postgresqlco.nf/doc/en/param/recovery_target_timeline/)\ + \ to stop recovery just after the specified\n recovery\ + \ target (true), or just before the recovery target\ + \ (false). Applies when targetLsn, pointInTimeRecovery,\ + \ or targetXid is specified. This\n setting controls\ + \ whether transactions having exactly the target WAL\ + \ location (LSN), commit time, or transaction ID,\ + \ respectively, will be included\n in the recovery.\ + \ Default is true.\n" + targetName: + type: string + description: "[recovery_target_name](https://postgresqlco.nf/doc/en/param/recovery_target_name/)\ + \ specifies the named restore point\n (created with\ + \ pg_create_restore_point()) to which recovery will\ + \ proceed.\n" + targetXid: + type: string + description: "[recovery_target_xid](https://postgresqlco.nf/doc/en/param/recovery_target_xid/)\ + \ specifies the transaction ID up to which recovery\ + \ will proceed.\n Keep in mind that while transaction\ + \ IDs are assigned sequentially at transaction start,\ + \ transactions can complete in a different numeric\ + \ order.\n The transactions that will be recovered\ + \ are those that committed before (and optionally\ + \ including) the specified one. The precise stopping\ + \ point\n is also influenced by targetInclusive.\n" + targetLsn: + type: string + description: "[recovery_target_lsn](https://postgresqlco.nf/doc/en/param/recovery_target_lsn/)\ + \ specifies the LSN of the write-ahead log location\ + \ up to which\n recovery will proceed. The precise\ + \ stopping point is also influenced by targetInclusive.\ + \ This parameter is parsed using the system data type\n\ + \ pg_lsn.\n" + pointInTimeRecovery: + type: object + description: "Using Point-in-Time Recovery (PITR) it\ + \ is possible to restore the database to its state\ + \ at any moment in the past by setting `restoreToTimestamp`\n\ + \ to a value between the timestamps at which your\ + \ chosen SGBackup and the subsequent one were taken.\ + \ If the chosen SGBackup is the latest one, the\n\ + \ `restoreToTimestamp` value can be between the timestamps\ + \ at which that last SGBackup was taken and the current\ + \ one.\n\nSee also: https://www.postgresql.org/docs/current/continuous-archiving.html\n" + properties: + restoreToTimestamp: + type: string + description: 'An ISO 8601 date, that holds UTC date + indicating at which point-in-time the database + have to be restored. + + ' + downloadDiskConcurrency: + type: integer + minimum: 1 + description: 'The backup fetch process may fetch several + streams in parallel. Parallel fetching is enabled when + set to a value larger than one. + + + If not specified it will be interpreted as latest. + + ' + scripts: + type: array + description: '**Deprecated** use instead .spec.managedSql with + SGScript. + + + A list of SQL scripts executed in sequence, exactly once, + when the database is bootstrap and/or after restore is completed. + + ' + items: + type: object + description: '**Deprecated** use instead .spec.managedSql + with SGScript. + + + Scripts are executed in auto-commit mode with the user `postgres` + in the specified database (or in database `postgres` if + not specified). + + + Fields `script` and `scriptFrom` are mutually exclusive + and only one of them is required. + + ' + properties: + name: + type: string + description: 'Name of the script. Must be unique across + this SGCluster. + + ' + database: + type: string + description: 'Database where the script is executed. Defaults + to the `postgres` database, if not specified. + + ' + script: + type: string + description: 'Raw SQL script to execute. This field is + mutually exclusive with `scriptFrom` field. + + ' + scriptFrom: + type: object + description: 'Reference to either a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) + or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + that contains the SQL script to execute. This field + is mutually exclusive with `script` field. + + + Fields `secretKeyRef` and `configMapKeyRef` are mutually + exclusive, and one of them is required. + + ' + properties: + secretKeyRef: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the SQL script to execute. This field + is mutually exclusive with `configMapKeyRef` field. + + ' + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + configMapKeyRef: + type: object + description: 'A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + reference that contains the SQL script to execute. + This field is mutually exclusive with `secretKeyRef` + field. + + ' + properties: + name: + type: string + description: 'The name of the ConfigMap that contains + the SQL script to execute. + + ' + key: + type: string + description: 'The key name within the ConfigMap + that contains the SQL script to execute. + + ' + replicateFrom: + type: object + description: "Make the cluster a read-only standby replica allowing\ + \ to replicate from another PostgreSQL instance and acting as\ + \ a rely.\n\nChanging this section is allowed to fix issues or\ + \ to change the replication source.\n\nRemoving this section convert\ + \ the cluster in a normal cluster where the standby leader is\ + \ converted into the a primary instance.\n\n**Example:**\n\nFrom\ + \ SGCluster instance:\n\n```yaml\napiVersion: stackgres.io/v1\n\ + kind: SGCluster\nmetadata:\n name: stackgres\nspec:\n replicateFrom:\n\ + \ instance:\n sgCluster: my-cluster\n```\n> **Note:**\ + \ The above example allow to replicate from another SGCluster\ + \ instance that in the same namespace and the same K8s cluster.\n\ + > \n> This option cannot be combined with external instance, storage\ + \ and users.\n\nFrom external instance:\n\n```yaml\napiVersion:\ + \ stackgres.io/v1\nkind: SGCluster\nmetadata:\n name: stackgres\n\ + spec:\n replicateFrom:\n instance:\n external:\n \ + \ host: ${HOST_IP}\n port: 5433\n users:\n superuser:\n\ + \ username:\n name: pg-origin-secret\n \ + \ key: superuser-username\n password:\n name:\ + \ pg-origin-secret\n key: superuser-password\n replication:\n\ + \ username:\n name: pg-origin-secret\n \ + \ key: replication-username\n password:\n name:\ + \ pg-origin-secret\n key: replication-password\n \ + \ authenticator:\n username:\n name: pg-origin-secret\n\ + \ key: authenticator-username\n password:\n \ + \ name: pg-origin-secret\n key: authenticator-password\n\ + ```\n\n> **Note:** Replace the ${HOST_IP} with the actual IP of\ + \ the external instance.\n\nFrom Storage:\n\n```yaml\napiVersion:\ + \ stackgres.io/v1\nkind: SGCluster\nmetadata:\n name: stackgres\n\ + spec:\n initialData:\n restore:\n fromBackup:\n \ + \ name: backup-name\n replicateFrom:\n storage:\n path:\ + \ ${PG_ORIGIN_BACKUP_PATH}\n sgObjectStorage: stackgres-backups\n\ + \ users:\n superuser:\n username:\n name:\ + \ pg-origin-secret\n key: superuser-username\n \ + \ password:\n name: pg-origin-secret\n key:\ + \ superuser-password\n replication:\n username:\n\ + \ name: pg-origin-secret\n key: replication-username\n\ + \ password:\n name: pg-origin-secret\n \ + \ key: replication-password\n authenticator:\n username:\n\ + \ name: pg-origin-secret\n key: authenticator-username\n\ + \ password:\n name: pg-origin-secret\n \ + \ key: authenticator-password\n```\n\n> **Note:** Using storage\ + \ only to replicate from requires to recover from a backup in\ + \ order to bootstrap the database.\n> \n> Replace the ${PG_ORIGIN_BACKUP_PATH}\ + \ with the actual path in the object storage where the backups\ + \ are stored.\n\nFrom external instance and storage:\n\n```yaml\n\ + apiVersion: stackgres.io/v1\nkind: SGCluster\nmetadata:\n name:\ + \ stackgres\nspec:\n replicateFrom:\n instance:\n external:\n\ + \ host: ${HOST_IP}\n port: 5433\n storage:\n\ + \ path: ${PG_ORIGIN_BACKUP_PATH}\n sgObjectStorage:\ + \ stackgres-backups\n users:\n superuser:\n username:\n\ + \ name: pg-origin-secret\n key: superuser-username\n\ + \ password:\n name: pg-origin-secret\n \ + \ key: superuser-password\n replication:\n username:\n\ + \ name: pg-origin-secret\n key: replication-username\n\ + \ password:\n name: pg-origin-secret\n \ + \ key: replication-password\n authenticator:\n username:\n\ + \ name: pg-origin-secret\n key: authenticator-username\n\ + \ password:\n name: pg-origin-secret\n \ + \ key: authenticator-password\n```\n\n> **Note**: Replace the\ + \ ${HOST_IP} with the actual IP of the external instance.\n> \n\ + > Replace the ${PG_ORIGIN_BACKUP_PATH} with the actual path in\ + \ the object storage where the backups are stored.\n" + properties: + instance: + type: object + description: 'Configure replication from a PostgreSQL instance. + + ' + properties: + sgCluster: + type: string + description: 'Configure replication from an SGCluster. + + ' + external: + type: object + description: 'Configure replication from an external PostgreSQL + instance. + + ' + required: + - host + - port + properties: + host: + type: string + description: The host of the PostgreSQL to replicate + from. + port: + type: integer + description: The port of the PostgreSQL to replicate + from. + storage: + type: object + description: "Configure replication from an SGObjectStorage\ + \ using WAL shipping.\n\nThe file structure of the object\ + \ storage must follow the\n [WAL-G](https://github.com/wal-g/wal-g)\ + \ file structure.\n" + required: + - sgObjectStorage + - path + properties: + performance: + type: object + description: 'Configuration that affects the backup network + and disk usage performance during recovery. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth used + when storing a backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to read the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to the minimum between the number of file + to read and 10. + + ' + sgObjectStorage: + type: string + description: The SGObjectStorage name to replicate from. + path: + type: string + description: The path in the SGObjectStorage to replicate + from. + users: + type: object + description: 'Kubernetes [SecretKeySelectors](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the users. + + ' + required: + - superuser + - replication + - authenticator + properties: + superuser: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the superuser (usually + the postgres user). + + ' + required: + - username + - password + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + replication: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the replication user + used to replicate from the primary cluster and from replicas + of this cluster. + + ' + required: + - username + - password + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + authenticator: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the authenticator user + used by pgbouncer to authenticate other users. + + ' + required: + - username + - password + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + prometheusAutobind: + type: boolean + description: '**Deprecated** use instead .spec.configurations.observability.prometheusAutobind. + + ' + nonProductionOptions: + type: object + properties: + disableClusterPodAntiAffinity: + type: boolean + description: 'It is a best practice, on non-containerized environments, + when running production workloads, to run each database server + on a different server (virtual or physical), i.e., not to + co-locate more than one database server per host. + + + The same best practice applies to databases on containers. + By default, StackGres will not allow to run more than one + StackGres pod on a given Kubernetes node. Set this property + to true to allow more than one StackGres pod per node. + + + This property default value may be changed depending on the + value of field `.spec.profile`. + + + **Changing this field may require a restart.** + + ' + disablePatroniResourceRequirements: + type: boolean + description: 'It is a best practice, on containerized environments, + when running production workloads, to enforce container''s + resources requirements. + + + The same best practice applies to databases on containers. + By default, StackGres will configure resource requirements + for patroni container. Set this property to true to prevent + StackGres from setting patroni container''s resources requirement. + + + This property default value may be changed depending on the + value of field `.spec.profile`. + + + **Changing this field may require a restart.** + + ' + disableClusterResourceRequirements: + type: boolean + description: 'It is a best practice, on containerized environments, + when running production workloads, to enforce container''s + resources requirements. + + + By default, StackGres will configure resource requirements + for all the containers. Set this property to true to prevent + StackGres from setting container''s resources requirements + (except for patroni container, see `disablePatroniResourceRequirements`). + + + This property default value may be changed depending on the + value of field `.spec.profile`. + + + **Changing this field may require a restart.** + + ' + enableSetPatroniCpuRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ cpu requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less cpu than\ + \ it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs\ + \ on the node.\n\nBy default, StackGres will configure cpu\ + \ requirements to have the same limit and request for the\ + \ patroni container. Set this property to true to prevent\ + \ StackGres from setting patroni container's cpu requirements\ + \ request equals to the limit\n when `.spec.requests.cpu`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + default: false + enableSetClusterCpuRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ cpu requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less cpu than\ + \ it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs\ + \ on the node.\n\nBy default, StackGres will configure cpu\ + \ requirements to have the same limit and request for all\ + \ the containers. Set this property to true to prevent StackGres\ + \ from setting container's cpu requirements request equals\ + \ to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..cpu` `.spec.requests.initContainers..cpu` is configured in the referenced `SGInstanceProfile`.\n\ + \n**Changing this field may require a restart.**\n" + default: false + enableSetPatroniMemoryRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ memory requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less memory\ + \ than it requires.\n\nBy default, StackGres will configure\ + \ memory requirements to have the same limit and request for\ + \ the patroni container. Set this property to true to prevent\ + \ StackGres from setting patroni container's memory requirements\ + \ request equals to the limit\n when `.spec.requests.memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + default: false + enableSetClusterMemoryRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ memory requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less memory\ + \ than it requires.\n\nBy default, StackGres will configure\ + \ memory requirements to have the same limit and request for\ + \ all the containers. Set this property to true to prevent\ + \ StackGres from setting container's memory requirements request\ + \ equals to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..memory`\ + \ `.spec.requests.initContainers..memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + default: false + enabledFeatureGates: + type: array + description: 'A list of StackGres feature gates to enable (not + suitable for a production environment). + + + Available feature gates are: + + * `babelfish-flavor`: Allow to use `babelfish` flavor. + + ' + items: + type: string + description: The name of the fature gate to enable. + distributedLogs: + type: object + description: "StackGres features a functionality for all pods to\ + \ send Postgres, Patroni and PgBouncer logs to a central (distributed)\ + \ location, which is in turn another Postgres database. Logs can\ + \ then be accessed via SQL interface or from the web UI. This\ + \ section controls whether to enable this feature or not. If not\ + \ enabled, logs are send to the pod's standard output.\n\n**Example:**\n\ + \n```yaml\napiVersion: stackgres.io/v1\nkind: SGCluster\nmetadata:\n\ + \ name: stackgres\nspec:\n distributedLogs:\n sgDistributedLogs:\ + \ distributedlogs\n```\n" + properties: + sgDistributedLogs: + type: string + description: 'Name of the [SGDistributedLogs](https://stackgres.io/doc/latest/reference/crd/sgdistributedlogs/) + to use for this cluster. It must exist. + + ' + retention: + type: string + pattern: ^[0-9]+ (minutes?|hours?|days?|months?) + description: "Define a retention window with the syntax `\ + \ (minutes|hours|days|months)` in which log entries are kept.\n\ + \ Log entries will be removed when they get older more than\ + \ the double of the specified retention window.\n\nWhen this\ + \ field is changed the retention will be applied only to log\ + \ entries that are newer than the end of\n the retention\ + \ window previously specified. If no retention window was\ + \ previously specified it is considered\n to be of 7 days.\ + \ This means that if previous retention window is of `7 days`\ + \ new retention configuration will\n apply after UTC timestamp\ + \ calculated with: `SELECT date_trunc('days', now() at time\ + \ zone 'UTC') - INTERVAL '7 days'`.\n" + toInstallPostgresExtensions: + type: array + description: 'The list of Postgres extensions to install. + + + **This section is filled by the operator.** + + ' + items: + type: object + required: + - name + - publisher + - version + - repository + - postgresVersion + properties: + name: + type: string + description: The name of the extension to install. + publisher: + type: string + description: The id of the publisher of the extension to install. + version: + type: string + description: The version of the extension to install. + repository: + type: string + description: The repository base URL from where the extension + will be installed from. + postgresVersion: + type: string + description: The postgres major version of the extension to + install. + build: + type: string + description: The build version of the extension to install. + extraMounts: + type: array + description: The extra mounts of the extension to install. + items: + type: string + description: The extra mount of the installed extension. + status: + type: object + description: Current status of a StackGres cluster. + properties: + instances: + type: integer + description: Actual number of instances for the StackGres cluster. + Each instance is a Pod containing one Postgres server. + labelSelector: + type: string + description: Actual label selector for instances for the StackGres + cluster's Pods to be used by autoscaling. + replicationInitializationFailedSGBackup: + type: string + description: Indicates the latest failed backup for the replication + initialization. + conditions: + type: array + items: + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of deployment condition. + type: string + podStatuses: + type: array + description: The list of pod statuses. + items: + type: object + required: + - name + properties: + name: + type: string + description: The name of the pod. + replicationGroup: + type: integer + description: Indicates the replication group this Pod belongs + to. + primary: + type: boolean + description: Indicates if the pod is the elected primary + pendingRestart: + type: boolean + description: Indicates if the pod requires restart + installedPostgresExtensions: + type: array + description: The list of Postgres extensions currently installed. + items: + type: object + required: + - name + - publisher + - version + - repository + - postgresVersion + properties: + name: + type: string + description: The name of the installed extension. + publisher: + type: string + description: The id of the publisher of the installed + extension. + version: + type: string + description: The version of the installed extension. + repository: + type: string + description: The repository base URL from where the + extension was installed from. + postgresVersion: + type: string + description: The postgres major version of the installed + extension. + build: + type: string + description: The build version of the installed extension. + extraMounts: + type: array + description: The extra mounts of the installed extension. + items: + type: string + description: The extra mount of the installed extension. + dbOps: + type: object + description: 'Used by some [SGDbOps](https://stackgres.io/doc/latest/reference/crd/sgdbops) + to indicate the operation configuration and status to the operator. + + ' + properties: + majorVersionUpgrade: + type: object + description: 'The major version upgrade configuration and status + + ' + properties: + initialInstances: + type: array + description: 'The instances that this operation is targetting + + ' + items: + type: string + primaryInstance: + type: string + description: 'The primary instance that this operation is + targetting + + ' + sourcePostgresVersion: + type: string + description: 'The source PostgreSQL version + + ' + sourcePostgresExtensions: + type: array + description: 'The source PostgreSQL extensions + + ' + items: + type: object + properties: + name: + type: string + description: The name of the extension to deploy. + publisher: + type: string + description: The id of the publisher of the extension + to deploy. If not specified `com.ongres` will be + used by default. + default: com.ongres + version: + type: string + description: The version of the extension to deploy. + If not specified version of `stable` channel will + be used by default and if only a version is available + that one will be used. + repository: + type: string + description: 'The repository base URL from where to + obtain the extension to deploy. + + ' + sourceSgPostgresConfig: + type: string + description: 'The source SGPostgresConfig reference + + ' + sourceBackupPath: + type: string + description: 'The source backup path + + ' + targetPostgresVersion: + type: string + description: 'The target PostgreSQL version + + ' + locale: + type: string + description: 'The PostgreSQL locale + + ' + encoding: + type: string + description: 'The PostgreSQL encoding + + ' + dataChecksum: + type: boolean + description: 'Indicates if PostgreSQL data checksum is enabled + + ' + link: + type: boolean + description: 'Use `--link` option when running `pg_upgrade` + + ' + clone: + type: boolean + description: 'Use `--clone` option when running `pg_upgrade` + + ' + check: + type: boolean + description: 'Run `pg_upgrade` with check option instead + of performing the real upgrade + + ' + rollback: + type: boolean + description: 'Indicates to rollback from a previous major + version upgrade + + ' + restart: + type: object + description: 'The minor version upgrade configuration and status + + ' + properties: + initialInstances: + type: array + description: 'The instances that this operation is targetting + + ' + items: + type: string + primaryInstance: + type: string + description: 'The primary instance that this operation is + targetting + + ' + minorVersionUpgrade: + type: object + description: 'The minor version upgrade configuration and status + + ' + properties: + initialInstances: + type: array + description: 'The instances that this operation is targetting + + ' + items: + type: string + primaryInstance: + type: string + description: 'The primary instance that this operation is + targetting + + ' + sourcePostgresVersion: + type: string + description: 'Postgres version that is currently running + on the cluster + + ' + targetPostgresVersion: + type: string + description: 'The desired Postgres version for the cluster + + ' + securityUpgrade: + type: object + description: 'The minor version upgrade configuration and status + + ' + properties: + initialInstances: + type: array + description: 'The instances that this operation is targetting + + ' + items: + type: string + primaryInstance: + type: string + description: 'The primary instance that this operation is + targetting + + ' + arch: + type: string + description: The architecture on which the cluster has been initialized. + os: + type: string + description: The operative system on which the cluster has been + initialized. + labelPrefix: + type: string + description: The custom prefix that is prepended to all labels. + managedSql: + type: object + description: 'This section stores the state of referenced SQL scripts + that are applied to the cluster live. + + ' + properties: + scripts: + type: array + description: A list of statuses for script references. + items: + type: object + description: The status of a script reference. + properties: + id: + type: integer + description: Identify the associated `SGScript` entry + with the same value in the `id` field. + startedAt: + type: string + description: ISO-8601 datetime of when the script execution + has been started. + updatedAt: + type: string + description: ISO-8601 datetime of when the last script + execution occurred. Will be reset each time the referenced + `SGScripts` entry will be applied. + failedAt: + type: string + description: ISO-8601 datetime of when the script execution + had failed (mutually exclusive with `completedAt`). + completedAt: + type: string + description: ISO-8601 datetime of when the script execution + had completed (mutually exclusive with `failedAt`). + scripts: + type: array + description: A list of statuses for script entries of + referenced script. + items: + type: object + description: The status of a script entry of a referenced + script. + properties: + id: + type: integer + description: Identify the associated script entry + with the same value in the `id` field. + version: + type: integer + description: The latest version applied + intents: + type: integer + description: Indicates the number of intents or + failures occurred + failureCode: + type: string + description: If failed, the error code of the failure. + See also https://www.postgresql.org/docs/current/errcodes-appendix.html + failure: + type: string + description: If failed, a message of the failure + binding: + type: object + description: 'This section follow the schema specified in [Service + Binding spec for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service). + + + For more information see https://servicebinding.io/spec/core/1.0.0/ + + ' + properties: + name: + type: string + description: The name of the Secret as specified in [Service + Binding spec for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service). diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgconfigs.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgconfigs.yaml new file mode 100644 index 00000000000..3528e363ab8 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgconfigs.yaml @@ -0,0 +1,1511 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgconfigs.stackgres.io +spec: + group: stackgres.io + names: + kind: SGConfig + listKind: SGConfigList + plural: sgconfigs + singular: sgconfig + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .metadata.annotations.stackgres\.io/lockPod + name: operator-pod + type: string + - jsonPath: .status.version + name: operator-version + type: string + schema: + openAPIV3Schema: + type: object + description: "SGConfig stores the configuration of the StackGres Operator\n\ + \n> **WARNING**: Creating more than one SGConfig is forbidden.\n The single\ + \ SGConfig should be created automatically during installation.\n More\ + \ SGConfig may exists only when allowedNamespaces or allowedNamespaceLabelSelector\ + \ is used.\n" + properties: + spec: + description: Spec defines the desired state of SGConfig + type: object + properties: + containerRegistry: + type: string + default: quay.io + description: 'The container registry host (and port) where the images + will be pulled from. + + + > This value can only be set in operator helm chart or with the + environment variable `SG_CONTAINER_REGISTRY`. + + ' + imagePullPolicy: + type: string + default: IfNotPresent + description: Image pull policy used for images loaded by the Operator + imagePullSecrets: + type: array + description: 'The list of references to secrets in the same namespace + where a ServiceAccount is created by the operator to use for pulling + any images in pods that reference such ServiceAccount. ImagePullSecrets + are distinct from Secrets because Secrets can be mounted in the + pod, but ImagePullSecrets are only accessed by the kubelet. More + info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + + ' + items: + type: object + description: A reference to a secrets in the same namespace where + a ServiceAccount is created by the operator to use for pulling + any images in pods that reference such ServiceAccount. + properties: + name: + type: string + description: The name of the referenced Secret. + allowedNamespaces: + type: array + description: 'Section to configure allowed namespaces that the operator + is allowed to use. If empty all namespaces will be allowed (default). + + + > This value can only be set in operator helm chart or with the + environment variable `ALLOWED_NAMESPACES`. + + > It is set by OLM when [scoping the operator](https://olm.operatorframework.io/docs/advanced-tasks/operator-scoping-with-operatorgroups/). + + ' + items: + type: string + description: 'A namespace that the operator is allowed to use. + + ' + allowedNamespaceLabelSelector: + type: object + description: 'Section to configure namespaces that the operator + is allowed to use. If allowedNamespaces is defined it will be + used instead. If empty all namespaces will be allowed (default). + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#labelselector-v1-meta + + + > This value can only be set in operator helm chart. + + ' + additionalProperties: + type: string + disableClusterRole: + type: boolean + description: "When set to `true` the creation of the operator ClusterRole\ + \ and ClusterRoleBinding is disabled.\n Also, when `true`, some\ + \ features that rely on unnamespaced resources premissions will\ + \ be disabled:\n\n* Creation and upgrade of CustomResourceDefinitions\n\ + * Set CA bundle for Webhooks\n* Check existence of CustomResourceDefinition\ + \ when listing custom resources\n* Validation of StorageClass\n\ + * REST API endpoint `can-i/{verb}/{resource}` and `can-i` will\ + \ always return the full list of permissions for any resource\ + \ and verb since they rely on creation of subjectaccessreviews\ + \ unnamespaced resource that requires a cluster role.\n* Other\ + \ REST API endpoints will not work since they rely on impersonation\ + \ that requires a cluster role.\n This point in particular breaks\ + \ the Web Console completely. You may still enable this specific\ + \ cluster role with `.allowImpersonationForRestApi`.\n If you\ + \ do not need the Web Console you may still disable it completely\ + \ by setting `.deploy.restapi` to `false`.\n\nWhen set to `true`\ + \ and `allowedNamespaces` is not set or is empty then `allowedNamespaces`\ + \ will be considered set and containing only the namespace of\ + \ the operator.\n\nIt is `false` by default.\n\n> This value can\ + \ only be set in operator helm chart.\n" + allowImpersonationForRestApi: + type: boolean + description: 'When set to `true` the cluster role for impersonation + will be created even if `disableClusterRole` is set to `true`. + + + It is `false` by default. + + + > This value can only be set in operator helm chart. + + ' + disableCrdsAndWebhooksUpdate: + type: boolean + description: 'When set to `true` the cluster role to update or patch + CRDs will be disabled. + + + It is `false` by default. + + + > This value can only be set in operator helm chart. + + ' + sgConfigNamespace: + type: string + description: 'When set will indicate the namespace where the SGConfig + used by the operator will be created. + + + By default the SGConfig will be created in the same namespace + as the operator. + + + > This value can only be set in operator helm chart. + + ' + serviceAccount: + type: object + description: Section to configure Operator Installation ServiceAccount + properties: + create: + type: boolean + default: true + description: 'If `true` the Operator Installation ServiceAccount + will be created + + + > This value can only be set in operator helm chart. + + ' + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Section to configure Installation ServiceAccount + annotations + repoCredentials: + type: array + description: 'Repositories credentials Secret names + + + > This value can only be set in operator helm chart. + + ' + items: + type: string + description: 'Repository credentials Secret name + + + > This value can only be set in operator helm chart. + + ' + operator: + type: object + description: Section to configure Operator Pod + properties: + image: + type: object + description: Section to configure Operator image + properties: + name: + type: string + default: stackgres/operator + description: 'Operator image name + + + > This value can only be set in operator helm chart. + + ' + tag: + type: string + description: 'Operator image tag + + + > This value can only be set in operator helm chart. + + ' + pullPolicy: + type: string + default: IfNotPresent + description: 'Operator image pull policy + + + > This value can only be set in operator helm chart. + + ' + annotations: + type: object + description: Operator Pod annotations + x-kubernetes-preserve-unknown-fields: true + resources: + type: object + description: 'Operator Pod resources. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#resourcerequirements-v1-core + + + > This value can only be set in operator helm chart. + + ' + x-kubernetes-preserve-unknown-fields: true + nodeSelector: + type: object + x-kubernetes-preserve-unknown-fields: true + description: 'Operator Pod node selector + + + > This value can only be set in operator helm chart. + + ' + tolerations: + type: array + description: 'Operator Pod tolerations. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core + + + > This value can only be set in operator helm chart. + + ' + items: + type: object + x-kubernetes-preserve-unknown-fields: true + affinity: + type: object + x-kubernetes-preserve-unknown-fields: true + description: 'Operator Pod affinity. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#affinity-v1-core + + + > This value can only be set in operator helm chart. + + ' + serviceAccount: + type: object + description: Section to configure Operator ServiceAccount + properties: + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: 'Section to configure Operator ServiceAccount + annotations + + + > This value can only be set in operator helm chart. + + ' + repoCredentials: + type: array + description: 'Repositories credentials Secret names + + + > This value can only be set in operator helm chart. + + ' + items: + type: string + service: + type: object + description: Section to configure Operator Service + properties: + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: 'Section to configure Operator Service annotations + + + > This value can only be set in operator helm chart. + + ' + restapi: + type: object + description: Section to configure REST API Pod + properties: + name: + type: string + default: stackgres-restapi + description: REST API Deployment name + image: + type: object + description: Section to configure REST API image + properties: + name: + type: string + default: stackgres/restapi + description: REST API image name + tag: + type: string + description: REST API image tag + pullPolicy: + type: string + default: IfNotPresent + description: REST API image pull policy + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: REST API Pod annotations + resources: + type: object + x-kubernetes-preserve-unknown-fields: true + description: REST API Pod resources. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#resourcerequirements-v1-core + nodeSelector: + type: object + x-kubernetes-preserve-unknown-fields: true + description: REST API Pod node selector + tolerations: + type: array + description: REST API Pod tolerations. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + affinity: + type: object + x-kubernetes-preserve-unknown-fields: true + description: REST API Pod affinity. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#affinity-v1-core + serviceAccount: + type: object + description: Section to configure REST API ServiceAccount + properties: + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: REST API ServiceAccount annotations + repoCredentials: + type: array + description: Repositories credentials Secret names + items: + type: string + description: Repository credentials Secret name + service: + type: object + description: Section to configure REST API Service + properties: + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: REST API Service annotations + adminui: + type: object + description: Section to configure Web Console container + properties: + image: + type: object + description: Section to configure Web Console image + properties: + name: + type: string + default: stackgres/admin-ui + description: Web Console image name + tag: + type: string + description: Web Console image tag + pullPolicy: + type: string + default: IfNotPresent + description: Web Console image pull policy + resources: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Web Console resources. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#resourcerequirements-v1-core + service: + type: object + description: Section to configure Web Console service. + properties: + exposeHTTP: + type: boolean + default: false + description: When set to `true` the HTTP port will be exposed + in the Web Console Service + type: + type: string + default: ClusterIP + description: "The type used for the service of the UI:\n\ + * Set to LoadBalancer to create a load balancer (if supported\ + \ by the kubernetes cluster)\n to allow connect from\ + \ Internet to the UI. Note that enabling this feature\ + \ will probably incurr in\n some fee that depend on the\ + \ host of the kubernetes cluster (for example this is\ + \ true for EKS, GKE\n and AKS).\n* Set to NodePort to\ + \ expose admin UI from kubernetes nodes.\n" + loadBalancerIP: + type: string + description: 'LoadBalancer will get created with the IP + specified in + + this field. This feature depends on whether the underlying + cloud-provider supports specifying + + the loadBalancerIP when a load balancer is created. This + field will be ignored if the + + cloud-provider does not support the feature. + + ' + loadBalancerSourceRanges: + type: array + description: 'If specified and supported by the platform, + + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the + + specified client IPs. This field will be ignored if the + cloud-provider does not support the + + feature. + + More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + + ' + items: + type: string + nodePort: + type: integer + description: The HTTPS port used to expose the Service on + Kubernetes nodes + nodePortHTTP: + type: integer + description: The HTTP port used to expose the Service on + Kubernetes nodes + collector: + type: object + description: "Section to configure OpenTelemetry Collector\n\nBy\ + \ default a single instance of OpenTelemetry Collector will receive\ + \ metrics\n from all monitored Pods and will then exports those\ + \ metrics to\n a configured target (by default will expose a Prometheus\ + \ exporter).\n\nSee receivers section to scale this architecture\ + \ to a set of OpenTelemetry Collectors.\n" + default: + service: + spec: + type: ClusterIP + ports: + - name: prom-http + protocol: TCP + port: 9464 + targetPort: prom-http + ports: + - name: prom-http + protocol: TCP + containerPort: 9464 + config: + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + tls: + ca_file: /etc/operator/certs/tls.crt + cert_file: /etc/operator/certs/tls.crt + key_file: /etc/operator/certs/tls.key + exporters: + prometheus: + endpoint: 0.0.0.0:9464 + tls: + ca_file: /etc/operator/certs/tls.crt + cert_file: /etc/operator/certs/tls.crt + key_file: /etc/operator/certs/tls.key + reload_interval: 10m + send_timestamps: true + metric_expiration: 180m + enable_open_metrics: false + resource_to_telemetry_conversion: + enabled: false + otlp: + endpoint: stackgres-collector:4317 + tls: + ca_file: /etc/operator/certs/tls.crt + service: + pipelines: + metrics: + receivers: + - prometheus + exporters: + - prometheus + prometheusOperator: + allowDiscovery: true + properties: + name: + type: string + default: stackgres-collector + description: OpenTelemetry Collector Deploymnet/Deamonset base + name + receivers: + type: object + description: "This section allow to configure a variable number\ + \ of OpenTelemetry Collector\n receivers (by default equals\ + \ to the number of Pod with metrics enabled)\n that will scrape\ + \ the metrics separately and send them to a defined number\n\ + \ of OpenTelemetry Collector exporters (by default 1) that\ + \ exports those metrics\n to one or more configured targets\ + \ (by default will expose a Prometheus exporter).\n" + properties: + enabled: + type: boolean + description: "When set to `true` it enables the creation\ + \ of a set of OpenTelemetry Collectors receivers\n that\ + \ will be scraping from the SGCluster Pods and allow to\ + \ scale the observability\n architecture and a set of\ + \ OpenTelemetry Collectors exporters that exports those\ + \ metrics\n to one or more configured targets.\n" + default: false + exporters: + type: integer + description: "When receivers are enabled indicates the number\ + \ of OpenTelemetry Collectors exporters that\n exports\ + \ metrics to one or more configured targets.\n" + default: 1 + deployments: + type: array + description: "A set of separate Deployments of 1 instance\ + \ each that allow to set the OpenTelemetry Collectors\ + \ receivers to a specified number of instances.\n\nWhen\ + \ not set the number of Deployment of OpenTelemetry Collectors\ + \ receivers will match the number of instances of all\ + \ the existing SGClusters\n that has the field `.spec.configurations.observability.enableMetrics`\ + \ set to `true`. Also, when not set, each Deployment will\ + \ include a pod\n affinity rule matching any of the SGClusters\ + \ Pods set defined below. This will allow to create an\ + \ OpenTelemetry Collector receiver instance\n dedicated\ + \ to each SGCluster Pod running in the same Node.\n\n\ + Each Deployment will use a configuration for the OpenTelemetry\ + \ Collector that will scrape from a set of SGClusters\ + \ Pods that has the field\n `.spec.configurations.observability.enableMetrics`\ + \ set to `true`. The set of Pods of each of those OpenTelemetry\ + \ Collector configuration\n will be a partition of the\ + \ list of SGClusters Pods that has the field `.spec.configurations.observability.enableMetrics`\ + \ set to `true`\n ordered by the field `Pod.metadata.creationTimestamp`\ + \ (from the oldest to the newest) and ordered crescently\ + \ alphabetically by the fields\n `Pod.metadata.namespace`\ + \ and `Pod.metadata.name`.\n\nIf is possible to override\ + \ (even partially) the list of SGCluster Pods using the\ + \ `sgClusters` section.\n" + items: + type: object + properties: + sgClusters: + type: array + description: "List of SGCluster Pods to scrape from\ + \ this Deployment's Pod that will be included to\ + \ the OpenTelemetry Collector\n configuration alongside\ + \ the SGCluster Pods assigned as described in `SGConfig.spec.collector.receivers.deployments`.\n" + items: + type: object + properties: + namespace: + type: string + description: The namespace of the SGCluster + name: + type: string + description: The name of the SGCluster + indexes: + type: array + description: "The indexes of the SGCluster's\ + \ Pods that will be included to the OpenTelemetry\ + \ Collector configuration alongside\n the\ + \ SGCluster Pods assigned as described in\ + \ `SGConfig.spec.collector.receivers.deployments`.\n\ + \nIf not specified all the SGCluster's Pods\ + \ will be included.\n" + items: + type: integer + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod annotations + resources: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod resources. + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#resourcerequirements-v1-core + nodeSelector: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod node selector + tolerations: + type: array + description: OpenTelemetry Collector Pod tolerations. + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + affinity: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod affinity. + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#affinity-v1-core + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod annotations + resources: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod resources. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#resourcerequirements-v1-core + nodeSelector: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod node selector + tolerations: + type: array + description: OpenTelemetry Collector Pod tolerations. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + affinity: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Pod affinity. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#affinity-v1-core + serviceAccount: + type: object + description: Section to configure OpenTelemetry Collector ServiceAccount + properties: + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector ServiceAccount annotations + repoCredentials: + type: array + description: Repositories credentials Secret names + items: + type: string + description: Repository credentials Secret name + service: + type: object + description: Section to configure OpenTelemetry Collector Service + properties: + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: OpenTelemetry Collector Service annotations + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Section to configure OpenTelemetry Collector + Service specs. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#servicespec-v1-core + ports: + type: array + description: Section to configure OpenTelemetry Collector ports. + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#containerport-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumeMounts: + type: array + description: Section to configure OpenTelemetry Collector Volume + Mounts. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumes: + type: array + description: Section to configure OpenTelemetry Collector Volumes. + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + config: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Section to configure OpenTelemetry Collector Configuration. + See https://opentelemetry.io/docs/collector/configuration + prometheusOperator: + type: object + description: Section to configure OpenTelemetry Collector integration + with Prometheus Operator. + properties: + allowDiscovery: + type: boolean + default: true + description: "If set to false or monitors is set automatic\ + \ bind to Prometheus\n created using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)\ + \ will be disabled.\n\nIf disabled the cluster will not\ + \ be binded to Prometheus automatically and will require\ + \ manual configuration.\n\nWill be ignored if monitors\ + \ is set.\n" + monitors: + type: array + description: "Optional section to configure PodMonitors\ + \ for specific Prometheus instances\n\n*WARNING*: resources\ + \ created by this integration that does set\n the metadata\ + \ namespace to the same as the operator will not\n be\ + \ removed when removing the helm chart. Changing the namespace\n\ + \ may require configure the Prometheus CR properly in\ + \ order to\n discover PodMonitor in such namespace.\n" + items: + type: object + description: Section to configure a PodMonitor for a specific + Prometheus instance that will scrape from the collector + Pod pointing by default to the prometheus exporter + properties: + name: + type: string + description: The name of the Prometheus resource that + will scrape from the collector Pod pointing by default + to the prometheus exporter + namespace: + type: string + description: The namespace of the Prometheus resource + that will scrape from the collector Pod pointing + by default to the prometheus exporter + metadata: + type: object + description: Section to overwrite some PodMonitor + metadata + properties: + name: + type: string + description: The name of the PodMonitor + namespace: + type: string + description: The namespace of the PodMonitor. + Changing the namespace may require configure + the Prometheus CR properly in order to discover + PodMonitor in such namespace. + labels: + type: object + x-kubernetes-preserve-unknown-fields: true + description: The labels to set for the PodMonitor + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: The labels to set for the PodMonitor + ownerReferences: + type: array + description: The ownerReferences to set for the + PodMonitor in order to be garbage collected + by the specified object. + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + description: The PodMonitor spec that will be overwritten + by the operator. See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.PodMonitorSpec + jobs: + type: object + description: Section to configure Operator Installation Jobs + properties: + image: + type: object + description: Section to configure Operator Installation Jobs + image + properties: + name: + type: string + default: stackgres/jobs + description: Operator Installation Jobs image name + tag: + type: string + description: Operator Installation Jobs image tag + pullPolicy: + type: string + default: IfNotPresent + description: Operator Installation Jobs image pull policy + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Operator Installation Jobs annotations + resources: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Operator Installation Jobs resources. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#resourcerequirements-v1-core + nodeSelector: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Operator Installation Jobs node selector + tolerations: + type: array + description: Operator Installation Jobs tolerations. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + affinity: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Operator Installation Jobs affinity. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#affinity-v1-core + serviceAccount: + type: object + description: Section to configure Jobs ServiceAccount + properties: + annotations: + type: object + x-kubernetes-preserve-unknown-fields: true + description: Jobs ServiceAccount annotations + repoCredentials: + type: array + description: Repositories credentials Secret names + items: + type: string + description: Repository credentials Secret name + deploy: + type: object + description: Section to configure deployment aspects. + properties: + operator: + type: boolean + default: true + description: When set to `true` the Operator will be deployed. + restapi: + type: boolean + default: true + description: When set to `true` the Web Console / REST API will + be deployed. + collector: + type: boolean + default: true + description: When set to `true` the OpenTelemetry Collector + will be deployed. + cert: + type: object + description: Section to configure the Operator, REST API and Web + Console certificates and JWT RSA key-pair. + properties: + autoapprove: + type: boolean + default: true + description: "If set to `true` the CertificateSigningRequest\ + \ used to generate the certificate used by\n Webhooks will\ + \ be approved by the Operator Installation Job.\n" + createForOperator: + type: boolean + default: true + description: When set to `true` the Operator certificate will + be created. + createForWebApi: + type: boolean + default: true + description: When set to `true` the Web Console / REST API certificate + will be created. + createForCollector: + type: boolean + default: true + description: When set to `true` the OpenTelemetry Collector + certificate will be created. + secretName: + type: string + description: "The Secret name with the Operator Webhooks certificate\ + \ issued by the Kubernetes cluster CA\n of type kubernetes.io/tls.\ + \ See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" + regenerateCert: + type: boolean + description: 'When set to `true` the Operator certificates will + be regenerated if `createForOperator` is set to `true`, and + the certificate is expired or invalid. + + ' + default: true + certDuration: + type: integer + description: 'The duration in days of the generated certificate + for the Operator after which it will expire and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + webSecretName: + type: string + description: "The Secret name with the Web Console / REST API\ + \ certificate\n of type kubernetes.io/tls. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" + regenerateWebCert: + type: boolean + description: 'When set to `true` the Web Console / REST API + certificates will be regenerated if `createForWebApi` is set + to `true`, and the certificate is expired or invalid. + + ' + default: true + regenerateWebRsa: + type: boolean + description: 'When set to `true` the Web Console / REST API + RSA key pair will be regenerated if `createForWebApi` is set + to `true`, and the certificate is expired or invalid. + + ' + default: true + webCertDuration: + type: integer + description: 'The duration in days of the generated certificate + for the Web Console / REST API after which it will expire + and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + webRsaDuration: + type: integer + description: 'The duration in days of the generated RSA key + pair for the Web Console / REST API after which it will expire + and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + collectorSecretName: + type: string + description: "The Secret name with the OpenTelemetry Collector\ + \ certificate\n of type kubernetes.io/tls. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" + regenerateCollectorCert: + type: boolean + description: 'When set to `true` the OpenTelemetry Collector + certificates will be regenerated if `createForCollector` is + set to `true`, and the certificate is expired or invalid. + + ' + default: true + collectorCertDuration: + type: integer + description: 'The duration in days of the generated certificate + for the OpenTelemetry Collector after which it will expire + and be regenerated. + + If not specified it will be set to 730 (2 years) by default. + + ' + certManager: + type: object + description: Section to configure cert-manager integration to + generate Operator certificates + properties: + autoConfigure: + type: boolean + default: false + description: "When set to `true` then Issuer and Certificate\ + \ for Operator, Web Console / REST API and OpenTelemetry\ + \ Collector\n Pods will be generated\n" + duration: + type: string + default: 2160h + description: The requested duration (i.e. lifetime) of the + Certificates. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io%2fv1 + renewBefore: + type: string + default: 360h + description: How long before the currently issued certificate’s + expiry cert-manager should renew the certificate. See + https://cert-manager.io/docs/reference/api-docs/#cert-manager.io%2fv1 + encoding: + type: string + default: PKCS1 + description: The private key cryptography standards (PKCS) + encoding for this certificate’s private key to be encoded + in. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey + size: + type: integer + default: 2048 + description: Size is the key bit size of the corresponding + private key for this certificate. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey + rbac: + type: object + description: Section to configure RBAC for Web Console admin user + properties: + create: + type: boolean + default: true + description: "When set to `true` the admin user is assigned\ + \ the `cluster-admin` ClusterRole by creating\n ClusterRoleBinding.\n" + authentication: + type: object + description: Section to configure Web Console authentication + properties: + type: + type: string + default: jwt + description: "Specify the authentication mechanism to use. By\ + \ default is `jwt`, see https://stackgres.io/doc/latest/api/rbac#local-secret-mechanism.\n\ + \ If set to `oidc` then see https://stackgres.io/doc/latest/api/rbac/#openid-connect-provider-mechanism.\n" + createAdminSecret: + type: boolean + description: 'When `true` will create the secret used to store + the admin user credentials to access the UI. + + ' + default: true + user: + type: string + default: admin + description: 'The admin username that will be created for the + Web Console + + + Operator bundle installation can not change the default value + of this field. + + ' + password: + type: string + description: 'The admin password that will be created for the + Web Console. + + + If not specified a random password will be generated. + + ' + secretRef: + type: object + description: 'Allow to specify a reference to a Secret with + the admin user credentials for the Web Console. + + + In order to assign properly permissions. Make sure the `user` + field match the value of the `k8sUsername` key in the referenced + Secret. + + ' + properties: + name: + description: The name of the Secret. + type: string + oidc: + type: object + description: Section to configure Web Console OIDC authentication + properties: + tlsVerification: + type: string + description: Can be one of `required`, `certificate-validation` + or `none` + authServerUrl: + type: string + clientId: + type: string + credentialsSecret: + type: string + clientIdSecretRef: + type: object + properties: + name: + type: string + key: + type: string + credentialsSecretSecretRef: + type: object + properties: + name: + type: string + key: + type: string + prometheus: + type: object + description: Section to configure Prometheus integration. + properties: + allowAutobind: + type: boolean + default: true + description: "If set to false disable automatic bind to Prometheus\n\ + \ created using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).\n\ + If disabled the cluster will not be binded to Prometheus automatically\ + \ and will require manual\n intervention by the Kubernetes\ + \ cluster administrator.\n" + grafana: + type: object + description: Section to configure Grafana integration + properties: + autoEmbed: + type: boolean + default: false + description: "When set to `true` embed automatically Grafana\ + \ into the Web Console by creating the\n StackGres dashboard\ + \ and the read-only role used to read it from the Web Console\ + \ \n" + schema: + type: string + default: http + description: "The schema to access Grafana. By default http.\ + \ (used to embed manually and\n automatically grafana)\n" + webHost: + type: string + description: "The service host name to access grafana (used\ + \ to embed manually and\n automatically Grafana). \nThe parameter\ + \ value should point to the grafana service following the\ + \ \n [DNS reference](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/)\ + \ `svc_name.namespace`\n" + datasourceName: + type: string + default: Prometheus + description: The datasource name used to create the StackGres + Dashboard into Grafana + user: + type: string + default: admin + description: "The username to access Grafana. By default admin.\ + \ (used to embed automatically\n Grafana)\n" + password: + type: string + default: prom-operator + description: "The password to access Grafana. By default prom-operator\ + \ (the default in for\n kube-prometheus-stack helm chart).\ + \ (used to embed automatically Grafana)\n" + secretNamespace: + type: string + description: "The namespace of secret with credentials to access\ + \ Grafana. (used to\n embed automatically Grafana, alternative\ + \ to use `user` and `password`)\n" + secretName: + type: string + description: "The name of secret with credentials to access\ + \ Grafana. (used to embed\n automatically Grafana, alternative\ + \ to use `user` and `password`)\n" + secretUserKey: + type: string + description: "The key of secret with username used to access\ + \ Grafana. (used to embed\n automatically Grafana, alternative\ + \ to use `user` and `password`)\n" + secretPasswordKey: + type: string + description: "The key of secret with password used to access\ + \ Grafana. (used to\n embed automatically Grafana, alternative\ + \ to use `user` and `password`)\n" + dashboardConfigMap: + type: string + description: "The ConfigMap name with the dashboard JSON in\ + \ the key `grafana-dashboard.json`\n that will be created\ + \ in Grafana. If not set the default\n" + dashboardId: + type: string + description: "The dashboard id that will be create in Grafana\n\ + \ (see https://grafana.com/grafana/dashboards). By default\ + \ 9628. (used to embed automatically\n Grafana)\n\nManual\ + \ Steps:\n \nCreate grafana dashboard for postgres exporter\ + \ and copy/paste share URL:\n- Grafana > Create > Import >\ + \ Grafana.com Dashboard 9628\nCopy/paste grafana dashboard\ + \ URL for postgres exporter:\n- Grafana > Dashboard > Manage\ + \ > Select postgres exporter dashboard > Copy URL\n" + url: + type: string + description: "The URL of the PostgreSQL dashboard created in\ + \ Grafana (used to embed manually\n Grafana)\n" + token: + type: string + description: "The Grafana API token to access the PostgreSQL\ + \ dashboard created\n in Grafana (used to embed manually Grafana)\n\ + \nManual Steps:\n \nCreate and copy/paste grafana API token:\n\ + - Grafana > Configuration > API Keys > Add API key (for viewer)\ + \ > Copy key value\n" + extensions: + type: object + description: Section to configure extensions + properties: + repositoryUrls: + type: array + default: + - https://extensions.stackgres.io/postgres/repository + description: "A list of extensions repository URLs used to retrieve\ + \ extensions\n\nTo set a proxy for extensions repository add\ + \ parameter proxyUrl to the URL:\n `https://extensions.stackgres.io/postgres/repository?proxyUrl=%3A%2F%2F[%3A]` (URL encoded)\n\ + \nOther URL parameters are:\n\n* `skipHostnameVerification`:\ + \ set it to `true` in order to use a server or a proxy with\ + \ a self signed certificate\n* `retry`: set it to `[:]` in order to retry a request on failure\n\ + * `setHttpScheme`: set it to `true` in order to force using\ + \ HTTP scheme\n" + items: + type: string + cache: + type: object + description: "Section to configure extensions cache (experimental).\n\ + \nThis feature is in beta and may cause failures, please use\ + \ with caution and report any\n error to https://gitlab.com/ongresinc/stackgres/-/issues/new\n" + properties: + enabled: + type: boolean + default: false + description: "When set to `true` enable the extensions cache.\n\ + \nThis feature is in beta and may cause failures, please\ + \ use with caution and report any\n error to https://gitlab.com/ongresinc/stackgres/-/issues/new\n" + preloadedExtensions: + type: array + default: + - x86_64/linux/timescaledb-1\.7\.4-pg12 + description: An array of extensions pattern used to pre-loaded + estensions into the extensions cache + items: + type: string + description: An extension pattern used to pre-loaded estensions + into the extensions cache + persistentVolume: + type: object + description: Section to configure the extensions cache PersistentVolume + properties: + size: + type: string + default: 1Gi + description: 'The PersistentVolume size for the extensions + cache + + + Only use whole numbers (e.g. not 1e6) and K/Ki/M/Mi/G/Gi + as units + + ' + storageClass: + type: string + description: "If defined set storage class\nIf set to\ + \ \"-\" (equivalent to storageClass: \"\" in a PV\ + \ spec) disables\n dynamic provisioning\nIf undefined\ + \ (the default) or set to null, no storageClass spec\ + \ is\n set, choosing the default provisioner. (gp2\ + \ on AWS, standard on\n GKE, AWS & OpenStack)\n" + hostPath: + type: string + description: "If set, will use a host path volume with the\ + \ specified path for the extensions cache\n instead of\ + \ a PersistentVolume\n" + shardingSphere: + type: object + description: Section to configure integration with ShardingSphere + operator + properties: + serviceAccount: + type: object + description: "Section to configure ServiceAccount used by ShardingSphere\ + \ operator.\n\nYou may configure a specific value for a sharded\ + \ cluster under section\n `SGShardedCluster.speccoordinator.configurations.shardingSphere.serviceAccount`.\n" + required: + - namespace + - name + properties: + namespace: + type: string + description: The namespace of the ServiceAccount used by + ShardingSphere operator + name: + type: string + description: The name of the ServiceAccount used by ShardingSphere + operator + developer: + type: object + x-kubernetes-preserve-unknown-fields: true + description: 'Section to configure developer options. + + + Following options are for developers only, but can also be useful + in some cases ;) + + ' + properties: + version: + type: string + description: Set the operator version (used for testing) + logLevel: + type: string + description: Set `quarkus.log.level`. See https://quarkus.io/guides/logging#root-logger-configuration + showDebug: + type: boolean + default: false + description: If set to `true` add extra debug to any script + controlled by the reconciliation cycle of the operator configuration + showStackTraces: + type: boolean + default: false + description: Set `quarkus.log.console.format` to `%d{yyyy-MM-dd + HH:mm:ss,SSS} %-5p [%c{4.}] (%t) %s%e%n`. See https://quarkus.io/guides/logging#logging-format + useJvmImages: + type: boolean + default: false + description: 'The operator will use JVM version of the images + + ' + enableJvmDebug: + type: boolean + default: false + description: "Only work with JVM version and allow connect\n\ + \ on port 8000 of operator Pod with jdb or similar\n" + enableJvmDebugSuspend: + type: boolean + default: false + description: "Only work with JVM version and if `enableJvmDebug`\ + \ is `true`\n suspend the JVM until a debugger session is\ + \ started\n" + externalOperatorIp: + type: string + description: Set the external Operator IP + externalOperatorPort: + type: integer + description: Set the external Operator port + externalRestApiIp: + type: string + description: Set the external REST API IP + externalRestApiPort: + type: integer + description: Set the external REST API port + allowPullExtensionsFromImageRepository: + type: boolean + default: false + description: "If set to `true` and `extensions.cache.enabled`\ + \ is also `true`\n it will try to download extensions from\ + \ images (experimental)\n" + disableArbitraryUser: + type: boolean + default: false + description: 'It set to `true` disable arbitrary user that is + set for OpenShift clusters + + ' + patches: + type: object + description: 'Section to define patches for some StackGres Pods + + ' + properties: + operator: + type: object + description: 'Section to define volumes to be used by the + operator container + + ' + properties: + volumes: + type: array + description: Pod volumes. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumeMounts: + type: array + description: Pod's container volume mounts. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + restapi: + type: object + description: 'Section to define volumes to be used by the + restapi container + + ' + properties: + volumes: + type: array + description: Pod volumes. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumeMounts: + type: array + description: Pod's container volume mounts. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + adminui: + type: object + description: 'Section to define volumes to be used by the + adminui container + + ' + properties: + volumes: + type: array + description: Pod volumes. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumeMounts: + type: array + description: Pod's container volume mounts. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + jobs: + type: object + description: 'Section to define volumes to be used by the + jobs container + + ' + properties: + volumes: + type: array + description: Pod volumes. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumeMounts: + type: array + description: Pod's container volume mounts. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + clusterController: + type: object + description: 'Section to define volumes to be used by the + cluster controller container + + ' + properties: + volumes: + type: array + description: Pod volumes. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumeMounts: + type: array + description: Pod's container volume mounts. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + stream: + type: object + description: 'Section to define volumes to be used by the + stream container + + ' + properties: + volumes: + type: array + description: Pod volumes. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + volumeMounts: + type: array + description: Pod's container volume mounts. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core + items: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + description: Status defines the observed state of SGConfig + x-kubernetes-preserve-unknown-fields: true + properties: + conditions: + type: array + items: + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of deployment condition. + type: string + version: + type: string + description: Latest version of the operator used to check for updates + removeOldOperatorBundleResources: + type: boolean + description: Indicate when the old operator bundle resources has + been removed + grafana: + type: object + properties: + urls: + description: Grafana URLs to StackGres dashboards + type: array + items: + type: string + description: Grafana URL to StackGres dashboards preceded + by the dashboard name and a semicolon `:` + token: + description: Grafana Token that allow to access dashboards + type: string + configHash: + description: Grafana configuration hash + type: string + existingCrUpdatedToVersion: + type: string + description: Indicate the version to which existing CRs have been + updated to diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgdbops.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgdbops.yaml new file mode 100644 index 00000000000..b7483dc4012 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgdbops.yaml @@ -0,0 +1,2607 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgdbops.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGDbOps + listKind: SGDbOpsList + plural: sgdbops + singular: sgdbops + shortNames: + - sgdo + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: cluster + type: string + jsonPath: .spec.sgCluster + - name: operation + type: string + jsonPath: .spec.op + - name: status + type: string + jsonPath: .status.conditions[?(@.status=="True")].reason + - name: started-at + type: string + jsonPath: .status.opStarted + priority: 1 + - name: retries + type: string + jsonPath: .status.opRetries + priority: 1 + schema: + openAPIV3Schema: + required: + - metadata + - spec + type: object + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 57 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the Database Operation. A database operation + represents a ""kind"" of operation on a StackGres cluster, classified + by a given name. The operation reference one SGCluster by its + name. Following [Kubernetes naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 `label`, an alphanumeric (a-z, and + 0-9) string, with the ''-'' character allowed anywhere except + the first or last character. + + + The name must be unique across all database operations in the + same namespace." + + ' + spec: + type: object + properties: + sgCluster: + type: string + description: 'The name of SGCluster on which the operation will + be performed. + + ' + scheduling: + type: object + description: Pod custom node scheduling and affinity configuration + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match a + node''s labels for the pod to be scheduled on that node. More + info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + matches the corresponding matchExpressions; the node(s) + with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. + The TopologySelectorTerm type implements a subset + of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union of the + results of one or more label queries over a set of nodes; + that is, it represents the OR of the selectors represented + by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. + The TopologySelectorTerm type implements a subset + of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which + indicate the highest priorities with the former being the + highest priority. Any other name must be defined by creating + a PriorityClass object with that name. If not specified, the + pod priority will be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not + be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm are + intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MatchLabelKeys and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select + the group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of + namespace names that the term applies to. The term + is applied to the union of the namespaces listed + in this field and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter pod anti + affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the + sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met at + some point during pod execution (e.g. due to a pod label + update), the system may or may not try to eventually evict + the pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MatchLabelKeys and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select + the group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of + namespace names that the term applies to. The term + is applied to the union of the namespaces listed + in this field and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + op: + type: string + description: 'The kind of operation that will be performed on the + SGCluster. Available operations are: + + + * `benchmark`: run a benchmark on the specified SGCluster and + report the results in the status. + + * `vacuum`: perform a [vacuum](https://www.postgresql.org/docs/current/sql-vacuum.html) + operation on the specified SGCluster. + + * `repack`: run [`pg_repack`](https://github.com/reorg/pg_repack) + command on the specified SGCluster. + + * `majorVersionUpgrade`: perform a major version upgrade of PostgreSQL + using [`pg_upgrade`](https://www.postgresql.org/docs/current/pgupgrade.html) + command. + + * `restart`: perform a restart of the cluster. + + * `minorVersionUpgrade`: perform a minor version upgrade of PostgreSQL. + + * `securityUpgrade`: perform a security upgrade of the cluster. + + ' + runAt: + type: string + description: 'An ISO 8601 date, that holds UTC scheduled date of + the operation execution. + + + If not specified or if the date it''s in the past, it will be + interpreted ASAP. + + ' + timeout: + type: string + description: 'An ISO 8601 duration in the format `PnDTnHnMn.nS`, + that specifies a timeout after which the operation execution will + be canceled. + + + If the operation can not be performed due to timeout expiration, + the condition `Failed` will have a status of `True` and the reason + will be `OperationTimedOut`. + + + If not specified the operation will never fail for timeout expiration. + + ' + maxRetries: + type: integer + description: 'The maximum number of retries the operation is allowed + to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: + `0`. + + ' + benchmark: + type: object + description: 'Configuration of the benchmark + + ' + properties: + type: + type: string + description: 'The type of benchmark that will be performed on + the SGCluster. Available benchmarks are: + + + * `pgbench`: run [pgbench](https://www.postgresql.org/docs/current/pgbench.html) + on the specified SGCluster and report the results in the status. + + * `sampling`: samples real queries and store them in the SGDbOps + status in order to be used by a `pgbench` benchmark using + `replay` mode. + + ' + database: + type: string + description: 'When specified will indicate the database where + the benchmark will run upon. + + + If not specified a target database with a random name will + be created and removed after the benchmark completes. + + ' + credentials: + type: object + description: The credentials of the user that will be used by + the benchmark + required: + - username + - password + properties: + username: + type: object + description: 'The username that will be used by the benchmark. + + + If not specified the default superuser username (by default + postgres) will be used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the username is + stored. + + ' + key: + type: string + description: 'The Secret key where the username is stored. + + ' + password: + type: object + description: 'The password that will be used by the benchmark + + + If not specified the default superuser password will be + used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the password is + stored. + + ' + key: + type: string + description: 'The Secret key where the password is stored. + + ' + sampling: + type: object + description: 'Configuration of sampling benchmark. + + ' + required: + - targetDatabase + - topQueriesCollectDuration + - samplingDuration + properties: + targetDatabase: + type: string + description: 'The target database to be sampled. By default + `postgres`. + + + The benchmark database will be used to store the sampled + queries but user must specify a target database to be + sampled in the `sampling` section. + + ' + topQueriesCollectDuration: + type: string + description: An ISO 8601 duration in the format `PnDTnHnMn.nS`, + that specifies how long the to wait before selecting top + queries in order to collect enough stats. + samplingDuration: + type: string + description: An ISO 8601 duration in the format `PnDTnHnMn.nS`, + that specifies how long will last the sampling of real + queries that will be replayed later. + mode: + type: string + description: 'The mode used to select the top queries used + for sampling: + + + * `time`: The top queries will be selected among the most + slow queries. + + * `calls`: The top queries will be selected among the + most called queries. + + * `custom`: The `customTopQueriesQuery` will be used to + select top queries. + + ' + topQueriesFilter: + type: string + description: Regular expression for filtering representative + statements when selecting top queries. Will be ignored + if `mode` is set to `custom`. By default is `^ *(with|select) + `. See https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-POSIX-REGEXP + topQueriesPercentile: + type: integer + description: Percentile of queries to consider as part of + the top queries. Will be ignored if `mode` is set to `custom`. + By default `95`. + topQueriesMin: + type: integer + description: Minimum number of queries to consider as part + of the top queries. By default `5`. + customTopQueriesQuery: + type: string + description: 'The query used to select top queries. Will + be ignored if `mode` is not set to `custom`. + + + The query must return at most 2 columns: + + + * First column returned by the query must be a column + holding the query identifier, also available in pg_stat_activity + (column `query_id`) and pg_stat_statements (column `queryid`). + + * Second column is optional and, if returned, must hold + a json object containing only text keys and values stat + will be used to generate the stats. + + + See also: + + + * https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-ACTIVITY-VIEW + + * https://www.postgresql.org/docs/current/pgstatstatements.html#PGSTATSTATEMENTS-PG-STAT-STATEMENTS + + ' + queries: + type: integer + description: Number of sampled queries to include in the + result. By default `10`. + omitTopQueriesInStatus: + type: boolean + description: When `true` omit to include the top queries + stats in the SGDbOps status. By default `false`. + samplingMinInterval: + type: integer + description: Minimum number of microseconds the sampler + will wait between each sample is taken. By default `10000` + (10 milliseconds). + pgbench: + type: object + description: 'Configuration of [pgbench](https://www.postgresql.org/docs/current/pgbench.html) + benchmark + + ' + properties: + mode: + type: string + description: 'The pgbench benchmark type: + + + * `tpcb-like`: The benchmark is inspired by the [TPC-B + benchmark](https://www.tpc.org/TPC_Documents_Latest_Versions/TPC-B_v2.0.0.pdf). + It is the default mode when `connectionType` is set to + `primary-service`. + + * `select-only`: The `tpcb-like` but only using SELECTs + commands. It is the default mode when `connectionType` + is set to `replicas-service`. + + * `custom`: will use the scripts in the `custom` section + to initialize and and run commands for the benchmark. + + * `replay`: will replay the sampled queries of a sampling + benchmark SGDbOps. If the `custom` section is specified + it will be used instead. Queries can be referenced setting + `custom.scripts.replay` to the index of the query in the + sampling benchmark SGDbOps''s status (index start from + 0). + + + See also https://www.postgresql.org/docs/current/pgbench.html#TRANSACTIONS-AND-SCRIPTS + + ' + databaseSize: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$ + description: 'Size of the database to generate. This size + is specified either in Mebibytes, Gibibytes or Tebibytes + (multiples of 2^20, 2^30 or 2^40, respectively). + + ' + duration: + type: string + description: 'An ISO 8601 duration in the format `PnDTnHnMn.nS`, + that specifies how long the benchmark will run. + + ' + usePreparedStatements: + type: boolean + description: '**Deprecated** this field is ignored, use + `queryMode` instead. + + + Use extended query protocol with prepared statements. + Defaults to: `false`. + + ' + queryMode: + type: string + description: 'Protocol to use for submitting queries to + the server: + + + * `simple`: use simple query protocol. + + * `extended`: use extended query protocol. + + * `prepared`: use extended query protocol with prepared + statements. + + + In the prepared mode, pgbench reuses the parse analysis + result starting from the second query iteration, so pgbench + runs faster than in other modes. + + + The default is `simple` query protocol. See also https://www.postgresql.org/docs/current/protocol.html + + ' + concurrentClients: + type: integer + description: 'Number of clients simulated, that is, number + of concurrent database sessions. Defaults to: `1`. + + ' + threads: + type: integer + description: 'Number of worker threads within pgbench. Using + more than one thread can be helpful on multi-CPU machines. + Clients are distributed as evenly as possible among available + threads. Default is `1`. + + ' + samplingRate: + type: number + description: 'Sampling rate, used when collecting data, + to reduce the amount of collected data. If this option + is given, only the specified fraction of transactions + are collected. 1.0 means all transactions will be logged, + 0.05 means only 5% of the transactions will be logged. + + ' + foreignKeys: + type: boolean + description: 'Create foreign key constraints between the + standard tables. (This option only take effect if `custom.initiailization` + is not specified). + + ' + unloggedTables: + type: boolean + description: 'Create all tables as unlogged tables, rather + than permanent tables. (This option only take effect if + `custom.initiailization` is not specified). + + ' + partitionMethod: + type: string + description: 'Create a partitioned pgbench_accounts table + with the specified method. Expected values are `range` + or `hash`. This option requires that partitions is set + to non-zero. If unspecified, default is `range`. (This + option only take effect if `custom.initiailization` is + not specified). + + ' + partitions: + type: integer + description: 'Create a partitioned pgbench_accounts table + with the specified number of partitions of nearly equal + size for the scaled number of accounts. Default is 0, + meaning no partitioning. (This option only take effect + if `custom.initiailization` is not specified). + + ' + initSteps: + type: string + description: "Perform just a selected set of the normal\ + \ initialization steps. init_steps specifies the initialization\ + \ steps to be performed, using one character per step.\ + \ Each step is invoked in the specified order. The default\ + \ is dtgvp. The available steps are:\n\n* `d` (Drop):\ + \ Drop any existing pgbench tables.\n* `t` (create Tables):\ + \ Create the tables used by the standard pgbench scenario,\ + \ namely pgbench_accounts, pgbench_branches, pgbench_history,\ + \ and pgbench_tellers.\n* `g` or `G` (Generate data, client-side\ + \ or server-side): Generate data and load it into the\ + \ standard tables, replacing any data already present.\n\ + \ With `g` (client-side data generation), data is generated\ + \ in pgbench client and then sent to the server. This\ + \ uses the client/server bandwidth extensively through\ + \ a COPY. pgbench uses the FREEZE option with version\ + \ 14 or later of PostgreSQL to speed up subsequent VACUUM,\ + \ unless partitions are enabled. Using g causes logging\ + \ to print one message every 100,000 rows while generating\ + \ data for the pgbench_accounts table.\n With `G` (server-side\ + \ data generation), only small queries are sent from the\ + \ pgbench client and then data is actually generated in\ + \ the server. No significant bandwidth is required for\ + \ this variant, but the server will do more work. Using\ + \ G causes logging not to print any progress message while\ + \ generating data.\n The default initialization behavior\ + \ uses client-side data generation (equivalent to g).\n\ + * `v` (Vacuum): Invoke VACUUM on the standard tables.\n\ + * `p` (create Primary keys): Create primary key indexes\ + \ on the standard tables.\n* `f` (create Foreign keys):\ + \ Create foreign key constraints between the standard\ + \ tables. (Note that this step is not performed by default.)\n" + fillfactor: + type: integer + description: 'Create the pgbench_accounts, pgbench_tellers + and pgbench_branches tables with the given fillfactor. + Default is 100. + + ' + noVacuum: + type: boolean + description: 'Perform no vacuuming during initialization. + (This option suppresses the `v` initialization step, even + if it was specified in `initSteps`.) + + ' + samplingSGDbOps: + type: string + description: benchmark SGDbOps of type sampling that will + be used to replay sampled queries. + custom: + type: object + description: This section allow to configure custom SQL + for initialization and scripts used by pgbench. + properties: + initialization: + type: object + description: 'The custom SQL for initialization that + will be executed in place of pgbench default initialization. + + + If not specified the default pgbench initialization + will be performed instead. + + ' + properties: + script: + type: string + description: 'Raw SQL script to execute. This field + is mutually exclusive with `scriptFrom` field. + + ' + scriptFrom: + type: object + description: 'Reference to either a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) + or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + that contains the SQL script to execute. This + field is mutually exclusive with `script` field. + + + Fields `secretKeyRef` and `configMapKeyRef` are + mutually exclusive, and one of them is required. + + ' + properties: + secretKeyRef: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the SQL script to execute. This + field is mutually exclusive with `configMapKeyRef` + field. + + ' + properties: + name: + type: string + description: Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + configMapKeyRef: + type: object + description: 'A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + reference that contains the SQL script to + execute. This field is mutually exclusive + with `secretKeyRef` field. + + ' + properties: + name: + type: string + description: 'The name of the ConfigMap + that contains the SQL script to execute. + + ' + key: + type: string + description: 'The key name within the ConfigMap + that contains the SQL script to execute. + + ' + scripts: + type: array + description: The custom SQL scripts that will be executed + by pgbench during the benchmark instead of default + pgbench scripts + minItems: 1 + items: + type: object + description: A custom SQL script that will be executed + by pgbench during the benchmark instead of default + pgbench scripts + properties: + script: + type: string + description: 'Raw SQL script to execute. This + field is mutually exclusive with `scriptFrom` + field. + + ' + scriptFrom: + type: object + description: 'Reference to either a Kubernetes + [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) + or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + that contains the SQL script to execute. This + field is mutually exclusive with `script` field. + + + Fields `secretKeyRef` and `configMapKeyRef` + are mutually exclusive, and one of them is required. + + ' + properties: + secretKeyRef: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the SQL script to execute. + This field is mutually exclusive with `configMapKeyRef` + field. + + ' + properties: + name: + type: string + description: Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to + select from. Must be a valid secret + key. + configMapKeyRef: + type: object + description: 'A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + reference that contains the SQL script to + execute. This field is mutually exclusive + with `secretKeyRef` field. + + ' + properties: + name: + type: string + description: 'The name of the ConfigMap + that contains the SQL script to execute. + + ' + key: + type: string + description: 'The key name within the + ConfigMap that contains the SQL script + to execute. + + ' + builtin: + type: string + description: 'The name of the builtin script to + use. See https://www.postgresql.org/docs/current/pgbench.html#PGBENCH-OPTION-BUILTIN + + + When specified fields `replay`, `script` and + `scriptFrom` must not be set. + + ' + replay: + type: integer + description: 'The index of the query in the sampling + benchmark SGDbOps''s status (index start from + 0). + + + When specified fields `builtin`, `script` and + `scriptFrom` must not be set. + + ' + weight: + type: integer + description: The weight of this custom SQL script. + required: + - databaseSize + - duration + connectionType: + type: string + description: 'Specify the service where the benchmark will connect + to: + + + * `primary-service`: Connect to the primary service + + * `replicas-service`: Connect to the replicas service + + ' + required: + - type + vacuum: + type: object + description: 'Configuration of [vacuum](https://www.postgresql.org/docs/current/sql-vacuum.html) + operation + + ' + properties: + full: + type: boolean + description: "If true selects \"full\" vacuum, which can reclaim\ + \ more space, but takes much longer and exclusively locks\ + \ the table.\nThis method also requires extra disk space,\ + \ since it writes a new copy of the table and doesn't release\ + \ the old copy\n until the operation is complete. Usually\ + \ this should only be used when a significant amount of space\ + \ needs to be\n reclaimed from within the table. Defaults\ + \ to: `false`.\n" + freeze: + type: boolean + description: "If true selects aggressive \"freezing\" of tuples.\ + \ Specifying FREEZE is equivalent to performing VACUUM with\ + \ the\n vacuum_freeze_min_age and vacuum_freeze_table_age\ + \ parameters set to zero. Aggressive freezing is always performed\n\ + \ when the table is rewritten, so this option is redundant\ + \ when FULL is specified. Defaults to: `false`.\n" + analyze: + type: boolean + description: 'If true, updates statistics used by the planner + to determine the most efficient way to execute a query. Defaults + to: `true`. + + ' + disablePageSkipping: + type: boolean + description: "Normally, VACUUM will skip pages based on the\ + \ visibility map. Pages where all tuples are known to be frozen\ + \ can always be\n skipped, and those where all tuples are\ + \ known to be visible to all transactions may be skipped except\ + \ when performing an\n aggressive vacuum. Furthermore, except\ + \ when performing an aggressive vacuum, some pages may be\ + \ skipped in order to avoid\n waiting for other sessions\ + \ to finish using them. This option disables all page-skipping\ + \ behavior, and is intended to be\n used only when the contents\ + \ of the visibility map are suspect, which should happen only\ + \ if there is a hardware or\n software issue causing database\ + \ corruption. Defaults to: `false`.\n" + databases: + type: array + description: 'List of databases to vacuum or repack, don''t + specify to select all databases + + ' + items: + type: object + required: + - name + properties: + name: + type: string + description: the name of the database + full: + type: boolean + description: "If true selects \"full\" vacuum, which can\ + \ reclaim more space, but takes much longer and exclusively\ + \ locks the table.\nThis method also requires extra\ + \ disk space, since it writes a new copy of the table\ + \ and doesn't release the old copy\n until the operation\ + \ is complete. Usually this should only be used when\ + \ a significant amount of space needs to be\n reclaimed\ + \ from within the table. Defaults to: `false`.\n" + freeze: + type: boolean + description: "If true selects aggressive \"freezing\"\ + \ of tuples. Specifying FREEZE is equivalent to performing\ + \ VACUUM with the\n vacuum_freeze_min_age and vacuum_freeze_table_age\ + \ parameters set to zero. Aggressive freezing is always\ + \ performed\n when the table is rewritten, so this\ + \ option is redundant when FULL is specified. Defaults\ + \ to: `false`.\n" + analyze: + type: boolean + description: 'If true, updates statistics used by the + planner to determine the most efficient way to execute + a query. Defaults to: `true`. + + ' + disablePageSkipping: + type: boolean + description: "Normally, VACUUM will skip pages based on\ + \ the visibility map. Pages where all tuples are known\ + \ to be frozen can always be\n skipped, and those where\ + \ all tuples are known to be visible to all transactions\ + \ may be skipped except when performing an\n aggressive\ + \ vacuum. Furthermore, except when performing an aggressive\ + \ vacuum, some pages may be skipped in order to avoid\n\ + \ waiting for other sessions to finish using them.\ + \ This option disables all page-skipping behavior, and\ + \ is intended to be\n used only when the contents of\ + \ the visibility map are suspect, which should happen\ + \ only if there is a hardware or\n software issue causing\ + \ database corruption. Defaults to: `false`.\n" + repack: + type: object + description: 'Configuration of [`pg_repack`](https://github.com/reorg/pg_repack) + command + + ' + properties: + noOrder: + type: boolean + description: 'If true do vacuum full instead of cluster. Defaults + to: `false`. + + ' + waitTimeout: + type: string + description: 'If specified, an ISO 8601 duration format `PnDTnHnMn.nS` + to set a timeout to cancel other backends on conflict. + + ' + noKillBackend: + type: boolean + description: 'If true don''t kill other backends when timed + out. Defaults to: `false`. + + ' + noAnalyze: + type: boolean + description: 'If true don''t analyze at end. Defaults to: `false`. + + ' + excludeExtension: + type: boolean + description: 'If true don''t repack tables which belong to specific + extension. Defaults to: `false`. + + ' + databases: + type: array + description: 'List of database to vacuum or repack, don''t specify + to select all databases + + ' + items: + type: object + required: + - name + properties: + name: + type: string + description: the name of the database + noOrder: + type: boolean + description: 'If true do vacuum full instead of cluster. + Defaults to: `false`. + + ' + waitTimeout: + type: string + description: 'If specified, an ISO 8601 duration format + `PnDTnHnMn.nS` to set a timeout to cancel other backends + on conflict. + + ' + noKillBackend: + type: boolean + description: 'If true don''t kill other backends when + timed out. Defaults to: `false`. + + ' + noAnalyze: + type: boolean + description: 'If true don''t analyze at end. Defaults + to: `false`. + + ' + excludeExtension: + type: boolean + description: 'If true don''t repack tables which belong + to specific extension. Defaults to: `false`. + + ' + majorVersionUpgrade: + type: object + description: 'Configuration of major version upgrade (see also [`pg_upgrade`](https://www.postgresql.org/docs/current/pgupgrade.html) + command) + + ' + properties: + postgresVersion: + type: string + description: 'The target postgres version that must have the + same major version of the target SGCluster. + + ' + postgresExtensions: + type: array + description: "A major version upgrade can not be performed if\ + \ a required extension is not present for the target major\ + \ version of the upgrade.\nIn those cases you will have to\ + \ provide the target extension version of the extension for\ + \ the target major version of postgres.\nBeware that in some\ + \ cases it is not possible to upgrade an extension alongside\ + \ postgres. This is the case for PostGIS or timescaledb.\n\ + \ In such cases you will have to upgrade the extension before\ + \ or after the major version upgrade. Please make sure you\ + \ read the\n documentation of each extension in order to understand\ + \ if it is possible to upgrade it during a major version upgrade\ + \ of postgres.\n" + items: + type: object + properties: + name: + type: string + description: The name of the extension to deploy. + publisher: + type: string + description: The id of the publisher of the extension + to deploy. If not specified `com.ongres` will be used + by default. + default: com.ongres + version: + type: string + description: The version of the extension to deploy. If + not specified version of `stable` channel will be used + by default and if only a version is available that one + will be used. + repository: + type: string + description: 'The repository base URL from where to obtain + the extension to deploy. + + + **This section is filled by the operator.** + + ' + required: + - name + sgPostgresConfig: + type: string + description: 'The postgres config that must have the same major + version of the target postgres version. + + ' + backupPath: + type: string + description: "The path were the backup is stored. If not set\ + \ this field is filled up by the operator.\n\nWhen provided\ + \ will indicate were the backups and WAL files will be stored.\n\ + \nThe path should be different from the current `.spec.configurations.backups[].path`\ + \ value for the target `SGCluster`\n in order to avoid mixing\ + \ WAL files of two distinct major versions of postgres.\n" + link: + type: boolean + description: 'If true use hard links instead of copying files + to the new cluster. This option is mutually exclusive with + `clone`. Defaults to: `false`. + + ' + clone: + type: boolean + description: "If true use efficient file cloning (also known\ + \ as \"reflinks\" on some systems) instead of copying files\ + \ to the new cluster.\nThis can result in near-instantaneous\ + \ copying of the data files, giving the speed advantages of\ + \ `link` while leaving the old\n cluster untouched. This\ + \ option is mutually exclusive with `link`. Defaults to: `false`.\n\ + \nFile cloning is only supported on some operating systems\ + \ and file systems. If it is selected but not supported, the\ + \ pg_upgrade\n run will error. At present, it is supported\ + \ on Linux (kernel 4.5 or later) with Btrfs and XFS (on file\ + \ systems created with\n reflink support), and on macOS with\ + \ APFS.\n" + check: + type: boolean + description: 'If true does some checks to see if the cluster + can perform a major version upgrade without changing any data. + Defaults to: `false`. + + ' + toInstallPostgresExtensions: + type: array + description: 'The list of Postgres extensions to install. + + + **This section is filled by the operator.** + + ' + items: + type: object + properties: + name: + type: string + description: The name of the extension to install. + publisher: + type: string + description: The id of the publisher of the extension + to install. + version: + type: string + description: The version of the extension to install. + repository: + type: string + description: The repository base URL from where the extension + will be installed from. + postgresVersion: + type: string + description: The postgres major version of the extension + to install. + build: + type: string + description: The build version of the extension to install. + extraMounts: + type: array + description: The extra mounts of the extension to install. + items: + type: string + description: The extra mount of the installed extension. + required: + - name + - publisher + - version + - repository + - postgresVersion + restart: + type: object + description: 'Configuration of restart + + ' + properties: + method: + type: string + description: "The method used to perform the restart operation.\ + \ Available methods are:\n\n* `InPlace`: the in-place method\ + \ does not require more resources than those that are available.\n\ + \ In case only an instance of the StackGres cluster is present\ + \ this mean the service disruption will\n last longer so\ + \ we encourage use the reduced impact restart and especially\ + \ for a production environment.\n* `ReducedImpact`: this procedure\ + \ is the same as the in-place method but require additional\n\ + \ resources in order to spawn a new updated replica that\ + \ will be removed when the procedure completes.\n" + onlyPendingRestart: + type: boolean + description: "By default all Pods are restarted. Setting this\ + \ option to `true` allow to restart only those Pods which\n\ + \ are in pending restart state as detected by the operation.\ + \ Defaults to: `false`.\n" + minorVersionUpgrade: + type: object + description: 'Configuration of minor version upgrade + + ' + properties: + postgresVersion: + type: string + description: 'The target postgres version that must have the + same major version of the target SGCluster. + + ' + method: + type: string + description: "The method used to perform the minor version upgrade\ + \ operation. Available methods are:\n\n* `InPlace`: the in-place\ + \ method does not require more resources than those that are\ + \ available.\n In case only an instance of the StackGres\ + \ cluster is present this mean the service disruption will\n\ + \ last longer so we encourage use the reduced impact restart\ + \ and especially for a production environment.\n* `ReducedImpact`:\ + \ this procedure is the same as the in-place method but require\ + \ additional\n resources in order to spawn a new updated\ + \ replica that will be removed when the procedure completes.\n" + securityUpgrade: + type: object + description: 'Configuration of security upgrade + + ' + properties: + method: + type: string + description: "The method used to perform the security upgrade\ + \ operation. Available methods are:\n\n* `InPlace`: the in-place\ + \ method does not require more resources than those that are\ + \ available.\n In case only an instance of the StackGres\ + \ cluster is present this mean the service disruption will\n\ + \ last longer so we encourage use the reduced impact restart\ + \ and especially for a production environment.\n* `ReducedImpact`:\ + \ this procedure is the same as the in-place method but require\ + \ additional\n resources in order to spawn a new updated\ + \ replica that will be removed when the procedure completes.\n" + required: + - sgCluster + - op + status: + type: object + properties: + conditions: + type: array + description: 'Possible conditions are: + + + * Running: to indicate when the operation is actually running + + * Completed: to indicate when the operation has completed successfully + + * Failed: to indicate when the operation has failed + + ' + items: + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition last transition. + type: string + status: + description: Status of the condition, one of `True`, `False` + or `Unknown`. + type: string + type: + description: Type of deployment condition. + type: string + opRetries: + type: integer + description: 'The number of retries performed by the operation + + ' + opStarted: + type: string + description: 'The ISO 8601 timestamp of when the operation started + running + + ' + benchmark: + type: object + description: 'The results of the benchmark + + ' + properties: + sampling: + type: object + description: The results of the sampling benchmark + properties: + topQueries: + type: array + description: The top queries sampled with the stats from + pg_stat_statements. If is omitted if `omitTopQueriesInStatus` + is set to `true`. + items: + type: object + properties: + id: + type: string + description: The query id of the representative statement + calculated by Postgres + stats: + type: object + description: stats collected by the top queries query + additionalProperties: + type: string + queries: + type: array + description: The queries sampled. + items: + type: object + properties: + id: + type: string + description: The query id of the representative statement + calculated by Postgres + query: + type: string + description: A sampled SQL query + timestamp: + type: string + description: The sampled query timestamp + pgbench: + type: object + description: The results of the pgbench benchmark + properties: + scaleFactor: + type: number + nullable: true + description: 'The scale factor used to run pgbench (`--scale`). + + ' + transactionsProcessed: + type: integer + nullable: true + description: 'The number of transactions processed. + + ' + latency: + type: object + description: 'The latency results of the pgbench benchmark + + ' + properties: + average: + type: object + description: 'Average latency of transactions + + ' + properties: + value: + type: number + nullable: true + description: 'The latency average value + + ' + unit: + type: string + nullable: false + description: 'The latency measure unit + + ' + standardDeviation: + type: object + description: 'The latency standard deviation of transactions. + + ' + properties: + value: + type: number + nullable: true + description: 'The latency standard deviation value + + ' + unit: + type: string + nullable: false + description: 'The latency measure unit + + ' + transactionsPerSecond: + type: object + description: 'All the transactions per second results of + the pgbench benchmark + + ' + properties: + includingConnectionsEstablishing: + type: object + description: 'Number of Transactions Per Second (tps) + including connection establishing. + + ' + properties: + value: + type: number + description: 'The Transactions Per Second (tps) + including connections establishing value + + ' + unit: + type: string + description: 'Transactions Per Second (tps) measure + unit + + ' + excludingConnectionsEstablishing: + type: object + description: 'Number of Transactions Per Second (tps) + excluding connection establishing. + + ' + properties: + value: + type: number + nullable: true + description: 'The Transactions Per Second (tps) + excluding connections establishing value + + ' + unit: + type: string + nullable: false + description: 'Transactions Per Second (tps) measure + unit + + ' + overTime: + type: object + description: The Transactions Per Second (tps) values + aggregated over unit of time + properties: + valuesUnit: + type: string + description: The Transactions Per Second (tps) measures + unit + values: + type: array + description: The Transactions Per Second (tps) values + aggregated over unit of time + items: + type: number + intervalDurationUnit: + type: string + description: The interval duration measure unit + intervalDuration: + type: number + description: The interval duration used to aggregate + the transactions per second. + statements: + type: array + description: Average per-statement latency (execution time + from the perspective of the client) of each command after + the benchmark finishes + items: + type: object + description: Average per-statement latency (execution + time from the perspective of the client) of a command + after the benchmark finishes + properties: + script: + type: integer + description: The script index (`0` if no custom scripts + have been defined) + command: + type: string + description: The command + latency: + type: number + description: Average latency of the command + unit: + type: string + description: The average latency measure unit + hdrHistogram: + type: string + description: Compressed and base 64 encoded HdrHistogram + majorVersionUpgrade: + type: object + description: 'The results of a major version upgrade + + ' + properties: + sourcePostgresVersion: + type: string + description: 'The postgres version currently used by the primary + instance + + ' + targetPostgresVersion: + type: string + description: 'The postgres version that the cluster will be + upgraded to + + ' + primaryInstance: + type: string + description: 'The primary instance when the operation started + + ' + initialInstances: + type: array + description: 'The instances present when the operation started + + ' + items: + type: string + pendingToRestartInstances: + type: array + description: 'The instances that are pending to be restarted + + ' + items: + type: string + restartedInstances: + type: array + description: 'The instances that have been restarted + + ' + items: + type: string + phase: + type: string + description: 'The phase the operation is or was executing) + + ' + failure: + type: string + description: 'A failure message (when available) + + ' + restart: + type: object + description: 'The results of a restart + + ' + properties: + primaryInstance: + type: string + description: 'The primary instance when the operation started + + ' + initialInstances: + type: array + description: 'The instances present when the operation started + + ' + items: + type: string + pendingToRestartInstances: + type: array + description: 'The instances that are pending to be restarted + + ' + items: + type: string + restartedInstances: + type: array + description: 'The instances that have been restarted + + ' + items: + type: string + switchoverInitiated: + type: string + description: 'An ISO 8601 date indicating if and when the switchover + initiated + + ' + switchoverFinalized: + type: string + description: 'An ISO 8601 date indicating if and when the switchover + finalized + + ' + failure: + type: string + description: 'A failure message (when available) + + ' + minorVersionUpgrade: + type: object + description: 'The results of a minor version upgrade + + ' + properties: + sourcePostgresVersion: + type: string + description: 'The postgres version currently used by the primary + instance + + ' + targetPostgresVersion: + type: string + description: 'The postgres version that the cluster will be + upgraded (or downgraded) to + + ' + primaryInstance: + type: string + description: 'The primary instance when the operation started + + ' + initialInstances: + type: array + description: 'The instances present when the operation started + + ' + items: + type: string + pendingToRestartInstances: + type: array + description: 'The instances that are pending to be restarted + + ' + items: + type: string + restartedInstances: + type: array + description: 'The instances that have been restarted + + ' + items: + type: string + switchoverInitiated: + type: string + description: 'An ISO 8601 date indicating if and when the switchover + initiated + + ' + switchoverFinalized: + type: string + description: 'An ISO 8601 date indicating if and when the switchover + finalized + + ' + failure: + type: string + description: 'A failure message (when available) + + ' + securityUpgrade: + type: object + description: 'The results of a security upgrade + + ' + properties: + primaryInstance: + type: string + description: 'The primary instance when the operation started + + ' + initialInstances: + type: array + description: 'The instances present when the operation started + + ' + items: + type: string + pendingToRestartInstances: + type: array + description: 'The instances that are pending to be restarted + + ' + items: + type: string + restartedInstances: + type: array + description: 'The instances that have been restarted + + ' + items: + type: string + switchoverInitiated: + type: string + description: 'An ISO 8601 date indicating if and when the switchover + initiated + + ' + switchoverFinalized: + type: string + description: 'An ISO 8601 date indicating if and when the switchover + finalized + + ' + failure: + type: string + description: 'A failure message (when available) + + ' diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgdistributedlogs.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgdistributedlogs.yaml new file mode 100644 index 00000000000..960b3ea156f --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgdistributedlogs.yaml @@ -0,0 +1,1932 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgdistributedlogs.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGDistributedLogs + listKind: SGDistributedLogsList + plural: sgdistributedlogs + singular: sgdistributedlogs + shortNames: + - sgdil + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: volume-size + type: string + jsonPath: .spec.persistentVolume.size + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 44 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the Distributed Logs cluster. Following [Kubernetes + naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + A Distributed Logs cluster may store logs for zero or more SGClusters. + + + The name must be unique across all SGCluster, SGShardedCluster + and SGDistributedLogs in the same namespace. + + ' + spec: + type: object + properties: + profile: + type: string + description: "The profile allow to change in a convenient place\ + \ a set of configuration defaults that affect how the cluster\ + \ is generated.\n\nAll those defaults can be overwritten by setting\ + \ the correspoinding fields.\n\nAvailable profiles are:\n\n* `production`:\n\ + \n Prevents two Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `false` by default).\n Sets both limits and requests using\ + \ `SGInstanceProfile` for `patroni` container that runs both Patroni\ + \ and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced\ + \ `SGInstanceProfile` for sidecar containers other than `patroni`\ + \ (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `testing`:\n\n Allows two Pods\ + \ to running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Sets both limits and requests using\ + \ `SGInstanceProfile` for `patroni` container that runs both Patroni\ + \ and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced\ + \ `SGInstanceProfile` for sidecar containers other than `patroni`\ + \ (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `development`:\n\n Allows two\ + \ Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Unset both limits and requests for\ + \ `patroni` container that runs both Patroni and Postgres (set\ + \ `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `true` by default).\n Unsets requests for sidecar containers\ + \ other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `true` by default).\n\n**Changing this field may require\ + \ a restart.**\n" + default: production + persistentVolume: + type: object + description: Pod's persistent volume configuration + properties: + size: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$ + description: 'Size of the PersistentVolume set for the pod of + the cluster for distributed logs. This size is specified either + in Mebibytes, Gibibytes or Tebibytes (multiples of 2^20, 2^30 + or 2^40, respectively). + + ' + storageClass: + type: string + description: 'Name of an existing StorageClass in the Kubernetes + cluster, used to create the PersistentVolumes for the instances + of the cluster. + + ' + postgresServices: + type: object + nullable: true + description: "Kubernetes [services](https://kubernetes.io/docs/concepts/services-networking/service/)\ + \ created or managed by StackGres.\n\n**Example:**\n\n```yaml\n\ + apiVersion: stackgres.io/v1\nkind: SGDistributedLogs\nmetadata:\n\ + \ name: stackgres\nspec:\n postgresServices:\n primary:\n\ + \ type: ClusterIP\n replicas:\n enabled: true\n \ + \ type: ClusterIP\n```\n" + properties: + primary: + type: object + description: Configuration for the `-primary` service. It provides + a stable connection (regardless of primary failures or switchovers) + to the read-write Postgres server of the cluster. + properties: + type: + type: string + enum: + - ClusterIP + - LoadBalancer + - NodePort + description: Specifies the type of Kubernetes service(`ClusterIP`, + `LoadBalancer`, `NodePort`) + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if NodePorts + will be automatically allocated for services with type + LoadBalancer. Default is "true". It may be set to "false" + if the cluster load-balancer does not rely on NodePorts. If + the caller requests specific NodePorts (by specifying + a value), those requests will be respected, regardless + of this field. This field may only be set for services + with type LoadBalancer and will be cleared if the type + is changed to any other type. + type: boolean + externalIPs: + description: 'externalIPs is a list of IP addresses for + which nodes in the cluster will also accept traffic for + this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes system. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#allocateloadbalancernodeports-v1-core' + items: + type: string + type: array + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes distribute + service traffic they receive on one of the Service's "externally-facing" + addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). + If set to "Local", the proxy will configure the service + in a way that assumes that external load balancers will + take care of balancing the service traffic between nodes, + and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client + source IP. (Traffic mistakenly sent to a node with no + endpoints will be dropped.) The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer + IP from within the cluster will always get "Cluster" semantics, + but clients sending to a NodePort from within the cluster + may need to take traffic policy into account when picking + a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is set + to Local. If a value is specified, is in-range, and is + not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. + load-balancers) can use this port to determine if a given + node holds endpoints for this service or not. If this + field is specified when creating a Service which does + not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing + type). This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes distribute + service traffic they receive on the ClusterIP. If set + to "Local", the proxy will assume that pods only want + to talk to endpoints of the service on the same node as + the pod, dropping the traffic if there are no local endpoints. + The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified + by topology and other features). + type: string + ipFamilies: + description: 'IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is usually + assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise + creation of the service will fail. This field is conditionally + mutable: it allows for adding or removing a secondary + IP family, but it does not allow changing the primary + IP family of the Service. Valid values are "IPv4" and + "IPv6". This field only applies to Services of types + ClusterIP, NodePort, and LoadBalancer, and does apply + to "headless" services. This field will be wiped when + updating a Service to type ExternalName. + + + This field may hold a maximum of two entries (dual-stack + families, in either order). These families must correspond + to the values of the clusterIPs field, if specified. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy + field.' + items: + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is no + value provided, then this field will be set to SingleStack. + Services can be "SingleStack" (a single IP family), "PreferDualStack" + (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend on + the value of this field. This field will be wiped when + updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If specified, + the value of this field must be a label-style identifier, + with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". + Unprefixed names are reserved for end-users. This field + can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is + used, today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any default + load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only + be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped + when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature. Deprecated: This field was + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client + IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that any + agent which deals with endpoints for this Service should + disregard any indications of ready/not-ready. The primary + use case for setting this field is for a StatefulSet's + Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes + controllers that generate Endpoints and EndpointSlice + resources for Services interpret this to mean that all + endpoints are considered "ready" even if the Pods themselves + are not. Agents which consume only Kubernetes generated + endpoints through the Endpoints or EndpointSlice resources + can safely assume this behavior. + type: boolean + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: 'SessionAffinityConfig represents the configurations + of session affinity. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#sessionaffinityconfig-v1-core' + properties: + clientIP: + description: ClientIPConfig represents the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + nodePorts: + type: object + description: nodePorts is a list of ports for exposing a + cluster services to the outside world + properties: + pgport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance + replicationport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance for replication purpose + replicas: + type: object + description: Configuration for the `-replicas` service. It provides + a stable connection (regardless of replica node failures) + to any read-only Postgres server of the cluster. Read-only + servers are load-balanced via this service. + properties: + type: + type: string + enum: + - ClusterIP + - LoadBalancer + - NodePort + description: Specifies the type of Kubernetes service(`ClusterIP`, + `LoadBalancer`, `NodePort`) + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if NodePorts + will be automatically allocated for services with type + LoadBalancer. Default is "true". It may be set to "false" + if the cluster load-balancer does not rely on NodePorts. If + the caller requests specific NodePorts (by specifying + a value), those requests will be respected, regardless + of this field. This field may only be set for services + with type LoadBalancer and will be cleared if the type + is changed to any other type. + type: boolean + externalIPs: + description: 'externalIPs is a list of IP addresses for + which nodes in the cluster will also accept traffic for + this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes system. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#allocateloadbalancernodeports-v1-core' + items: + type: string + type: array + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes distribute + service traffic they receive on one of the Service's "externally-facing" + addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). + If set to "Local", the proxy will configure the service + in a way that assumes that external load balancers will + take care of balancing the service traffic between nodes, + and so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the client + source IP. (Traffic mistakenly sent to a node with no + endpoints will be dropped.) The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + Note that traffic sent to an External IP or LoadBalancer + IP from within the cluster will always get "Cluster" semantics, + but clients sending to a NodePort from within the cluster + may need to take traffic policy into account when picking + a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is set + to Local. If a value is specified, is in-range, and is + not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. + load-balancers) can use this port to determine if a given + node holds endpoints for this service or not. If this + field is specified when creating a Service which does + not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing + type). This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes distribute + service traffic they receive on the ClusterIP. If set + to "Local", the proxy will assume that pods only want + to talk to endpoints of the service on the same node as + the pod, dropping the traffic if there are no local endpoints. + The default value, "Cluster", uses the standard behavior + of routing to all endpoints evenly (possibly modified + by topology and other features). + type: string + ipFamilies: + description: 'IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is usually + assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise + creation of the service will fail. This field is conditionally + mutable: it allows for adding or removing a secondary + IP family, but it does not allow changing the primary + IP family of the Service. Valid values are "IPv4" and + "IPv6". This field only applies to Services of types + ClusterIP, NodePort, and LoadBalancer, and does apply + to "headless" services. This field will be wiped when + updating a Service to type ExternalName. + + + This field may hold a maximum of two entries (dual-stack + families, in either order). These families must correspond + to the values of the clusterIPs field, if specified. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy + field.' + items: + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is no + value provided, then this field will be set to SingleStack. + Services can be "SingleStack" (a single IP family), "PreferDualStack" + (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend on + the value of this field. This field will be wiped when + updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If specified, + the value of this field must be a label-style identifier, + with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". + Unprefixed names are reserved for end-users. This field + can only be set when the Service type is 'LoadBalancer'. + If not set, the default load balancer implementation is + used, today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any default + load balancer implementation (e.g. cloud providers) should + ignore Services that set this field. This field can only + be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped + when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature. Deprecated: This field was + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client + IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that any + agent which deals with endpoints for this Service should + disregard any indications of ready/not-ready. The primary + use case for setting this field is for a StatefulSet's + Headless Service to propagate SRV DNS records for its + Pods for the purpose of peer discovery. The Kubernetes + controllers that generate Endpoints and EndpointSlice + resources for Services interpret this to mean that all + endpoints are considered "ready" even if the Pods themselves + are not. Agents which consume only Kubernetes generated + endpoints through the Endpoints or EndpointSlice resources + can safely assume this behavior. + type: boolean + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: 'SessionAffinityConfig represents the configurations + of session affinity. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#sessionaffinityconfig-v1-core' + properties: + clientIP: + description: ClientIPConfig represents the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + nodePorts: + type: object + description: nodePorts is a list of ports for exposing a + cluster services to the outside world + properties: + pgport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance + replicationport: + type: integer + description: the node port that will be exposed to connect + to Postgres instance for replication purpose + enabled: + type: boolean + description: Specify if the `-replicas` service should be + created or not. + resources: + type: object + description: Pod custom resources configuration. + properties: + enableClusterLimitsRequirements: + type: boolean + description: 'When set to `true` resources limits for containers + other than the patroni container wil be set just like for + patroni contianer as specified in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + disableResourcesRequestsSplitFromTotal: + type: boolean + description: "When set to `true` the resources requests values\ + \ in fields `SGInstanceProfile.spec.requests.cpu` and `SGInstanceProfile.spec.requests.memory`\ + \ will represent the resources\n requests of the patroni container\ + \ and the total resources requests calculated by adding the\ + \ resources requests of all the containers (including the\ + \ patroni container).\n\n**Changing this field may require\ + \ a restart.**\n" + scheduling: + type: object + description: 'Pod custom scheduling and affinity configuration. + + + **Changing this field may require a restart.** + + ' + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match a + node''s labels for the pod to be scheduled on that node. More + info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + matches the corresponding matchExpressions; the node(s) + with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. + The TopologySelectorTerm type implements a subset + of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union of the + results of one or more label queries over a set of nodes; + that is, it represents the OR of the selectors represented + by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. + The TopologySelectorTerm type implements a subset + of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which + indicate the highest priorities with the former being the + highest priority. Any other name must be defined by creating + a PriorityClass object with that name. If not specified, the + pod priority will be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not + be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm are + intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MatchLabelKeys and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select + the group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of + namespace names that the term applies to. The term + is applied to the union of the namespaces listed + in this field and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter pod anti + affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the + sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met at + some point during pod execution (e.g. due to a pod label + update), the system may or may not try to eventually evict + the pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MatchLabelKeys and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select + the group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of + namespace names that the term applies to. The term + is applied to the union of the namespaces listed + in this field and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + sgInstanceProfile: + type: string + description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/04-postgres-cluster-management/03-resource-profiles/). + A SGInstanceProfile defines CPU and memory limits. Must exist + before creating a distributed logs. When no profile is set, a + default (currently: 1 core, 2 GiB RAM) one is used. + + + **Changing this field may require a restart.** + + ' + configurations: + type: object + description: 'Cluster custom configurations. + + ' + properties: + sgPostgresConfig: + type: string + description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the distributed logs. It must exist. When not set, + a default Postgres config, for the major version selected, + is used. + + + **Changing this field may require a restart.** + + ' + metadata: + type: object + description: Metadata information for cluster created resources. + properties: + annotations: + type: object + description: "Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)\ + \ to be passed to resources created and managed by StackGres.\n\ + \n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1\nkind:\ + \ SGDistributedLogs\nmetadata:\n name: stackgres\nspec:\n\ + \ metadata:\n annotations:\n clusterPods:\n \ + \ key: value\n primaryService:\n key: value\n\ + \ replicasService:\n key: value\n```\n" + properties: + allResources: + type: object + description: Annotations to attach to any resource created + or managed by StackGres. + additionalProperties: + type: string + clusterPods: + type: object + description: Annotations to attach to pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Annotations to attach to all services created + or managed by StackGres. + additionalProperties: + type: string + primaryService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-primary` service. + additionalProperties: + type: string + replicasService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-replicas` service. + additionalProperties: + type: string + labels: + type: object + description: "Custom Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\ + \ to be passed to resources created and managed by StackGres.\n\ + \n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1\nkind:\ + \ SGDistributedLogs\nmetadata:\n name: stackgres\nspec:\n\ + \ metadata:\n labels:\n clusterPods:\n customLabel:\ + \ customLabelValue\n services:\n customLabel:\ + \ customLabelValue\n```\n" + properties: + clusterPods: + type: object + description: Labels to attach to Pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Labels to attach to Services and Endpoints + created or managed by StackGres. + additionalProperties: + type: string + nonProductionOptions: + type: object + properties: + disableClusterPodAntiAffinity: + type: boolean + description: 'It is a best practice, on non-containerized environments, + when running production workloads, to run each database server + on a different server (virtual or physical), i.e., not to + co-locate more than one database server per host. + + + The same best practice applies to databases on containers. + By default, StackGres will not allow to run more than one + StackGres or Distributed Logs pod on a given Kubernetes node. + If set to `true` it will allow more than one StackGres pod + per node. + + + **Changing this field may require a restart.** + + ' + disablePatroniResourceRequirements: + type: boolean + description: 'It is a best practice, on containerized environments, + when running production workloads, to enforce container''s + resources requirements. + + + The same best practice applies to databases on containers. + By default, StackGres will configure resource requirements + for patroni container. Set this property to true to prevent + StackGres from setting patroni container''s resources requirement. + + + **Changing this field may require a restart.** + + ' + disableClusterResourceRequirements: + type: boolean + description: 'It is a best practice, on containerized environments, + when running production workloads, to enforce container''s + resources requirements. + + + By default, StackGres will configure resource requirements + for all the containers. Set this property to true to prevent + StackGres from setting container''s resources requirements + (except for patroni container, see `disablePatroniResourceRequirements`). + + + **Changing this field may require a restart.** + + ' + enableSetPatroniCpuRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ cpu requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less cpu than\ + \ it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs\ + \ on the node.\n\nBy default, StackGres will configure cpu\ + \ requirements to have the same limit and request for the\ + \ patroni container. Set this property to true to prevent\ + \ StackGres from setting patroni container's cpu requirements\ + \ request equals to the limit\n when `.spec.requests.cpu`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + enableSetClusterCpuRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ cpu requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less cpu than\ + \ it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs\ + \ on the node.\n\nBy default, StackGres will configure cpu\ + \ requirements to have the same limit and request for all\ + \ the containers. Set this property to true to prevent StackGres\ + \ from setting container's cpu requirements request equals\ + \ to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..cpu` `.spec.requests.initContainers..cpu` is configured in the referenced `SGInstanceProfile`.\n\ + \n**Changing this field may require a restart.**\n" + enableSetPatroniMemoryRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ memory requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less memory\ + \ than it requires.\n\nBy default, StackGres will configure\ + \ memory requirements to have the same limit and request for\ + \ the patroni container. Set this property to true to prevent\ + \ StackGres from setting patroni container's memory requirements\ + \ request equals to the limit\n when `.spec.requests.memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + enableSetClusterMemoryRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ memory requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less memory\ + \ than it requires.\n\nBy default, StackGres will configure\ + \ memory requirements to have the same limit and request for\ + \ all the containers. Set this property to true to prevent\ + \ StackGres from setting container's memory requirements request\ + \ equals to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..memory`\ + \ `.spec.requests.initContainers..memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n\n\ + **Changing this field may require a restart.**\n" + required: + - persistentVolume + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of deployment condition. + type: string + postgresVersion: + type: string + description: The used Postgres version + timescaledbVersion: + type: string + description: The used Timescaledb version + databases: + type: array + description: The list of database status + items: + type: object + description: A database status + properties: + name: + type: string + description: The database name that has been created + retention: + type: string + description: The retention window that has been applied to + tables + connectedClusters: + type: array + description: The list of connected `sgclusters` + items: + type: object + description: A connected `sgcluster` + properties: + namespace: + type: string + description: The `sgcluster` namespace + name: + type: string + description: The `sgcluster` name + config: + type: object + description: The configuration for `sgdistributedlgos` of + this `sgcluster` + properties: + sgDistributedLogs: + type: string + description: The `sgdistributedlogs` to which this `sgcluster` + is connected to + retention: + type: string + description: The retention window that has been applied + to tables + fluentdConfigHash: + type: string + description: The hash of the configuration file that is used by + fluentd + labelPrefix: + type: string + description: The custom prefix that is prepended to all labels. + oldConfigMapRemoved: + type: boolean + description: Flag to indicate the previous existing ConfigMap has + been removed. diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sginstanceprofiles.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sginstanceprofiles.yaml new file mode 100644 index 00000000000..19a91e34139 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sginstanceprofiles.yaml @@ -0,0 +1,309 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sginstanceprofiles.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGInstanceProfile + listKind: SGInstanceProfileList + plural: sginstanceprofiles + singular: sginstanceprofile + shortNames: + - sginp + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: cpu + type: string + jsonPath: .spec.cpu + - name: memory + type: string + jsonPath: .spec.memory + schema: + openAPIV3Schema: + required: + - metadata + - spec + type: object + properties: + metadata: + type: object + properties: + name: + type: string + description: "Name of the Instance Profile. An instance profile\ + \ represents a \"kind\" of\n server (CPU and RAM) where you may\ + \ run StackGres Pods, classified by a given name.\n The profile\ + \ may be referenced by zero or more SGClusters, and if so it would\n\ + \ be referenced by its name. Following [Kubernetes naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md),\ + \ it must be an rfc1035/rfc1123 subdomain, that is, up to 253\ + \ characters consisting of one or more lowercase labels separated\ + \ by `.`. Where each label is an alphanumeric (a-z, and 0-9) string,\ + \ with the `-` character allowed anywhere except the first or\ + \ last character.\n\nThe name must be unique across all instance\ + \ profiles in the same namespace.\n" + spec: + type: object + properties: + cpu: + type: string + pattern: ^[1-9][0-9]*[m]?$ + description: "CPU(s) (cores) limits for every resource's Pod that\ + \ reference this SGInstanceProfile. The suffix `m`\n specifies\ + \ millicpus (where 1000m is equals to 1).\n\nThe number of cpu\ + \ limits is assigned to the patroni container (that runs both\ + \ Patroni and PostgreSQL).\n\nA minimum of 2 cpu is recommended.\n" + memory: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits for every resource's Pod that reference\ + \ this SGInstanceProfile. The suffix `Mi` or `Gi`\n specifies\ + \ Mebibytes or Gibibytes, respectively.\n\nThe amount of RAM limits\ + \ is assigned to the patroni container (that runs both Patroni\ + \ and PostgreSQL).\n\nA minimum of 2Gi is recommended.\n" + hugePages: + type: object + description: 'RAM limits allocated for huge pages of the patroni + container (that runs both Patroni and PostgreSQL). + + ' + properties: + hugepages-2Mi: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits allocated for huge pages of the patroni\ + \ container (that runs both Patroni and PostgreSQL) with a\ + \ size of 2Mi. The suffix `Mi` or `Gi`\n specifies Mebibytes\ + \ or Gibibytes, respectively.\n" + hugepages-1Gi: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits allocated for huge pages of the patroni\ + \ container (that runs both Patroni and PostgreSQL) with a\ + \ size of 1Gi. The suffix `Mi` or `Gi`\n specifies Mebibytes\ + \ or Gibibytes, respectively.\n" + containers: + type: object + description: 'The CPU(s) (cores) and RAM limits assigned to containers + other than patroni container. + + ' + additionalProperties: + type: object + description: "The CPU(s) (cores) and RAM limits assigned to a\ + \ container.\n\nThis section, if left empty, will be filled\ + \ automatically by the operator with\n some defaults that can\ + \ be proportional to the resources limits assigned to patroni\n\ + \ container (except for the huge pages that are always left\ + \ untouched).\n" + properties: + cpu: + type: string + pattern: ^[1-9][0-9]*[m]?$ + description: "CPU(s) (cores) limits for the specified container.\ + \ The suffix `m`\n specifies millicpus (where 1000m is\ + \ equals to 1).\n" + memory: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits for the specified container. The\ + \ suffix `Mi` or `Gi`\n specifies Mebibytes or Gibibytes,\ + \ respectively.\n" + hugePages: + type: object + description: 'RAM limits for huge pages for the specified + container. + + ' + properties: + hugepages-2Mi: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits for huge pages of the specified\ + \ container with a size of 2Mi. The suffix `Mi`\n or\ + \ `Gi` specifies Mebibytes or Gibibytes, respectively.\n" + hugepages-1Gi: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits for huge pages of the specified\ + \ container with a size of 1Gi. The suffix `Mi`\n or\ + \ `Gi` specifies Mebibytes or Gibibytes, respectively.\n" + initContainers: + type: object + description: The CPU(s) (cores) and RAM limits assigned to the init + containers. + additionalProperties: + type: object + description: "The CPU(s) (cores) and RAM limits assigned to a\ + \ init container.\n\nThis section will be filled automatically\ + \ by the operator with\n the same values of the resources limits\ + \ assigned to patroni\n container (except for the huge pages\ + \ that are always left untouched).\n" + properties: + cpu: + type: string + pattern: ^[1-9][0-9]*[m]?$ + description: "CPU(s) (cores) limits for the specified init\ + \ container. The suffix\n `m` specifies millicpus (where\ + \ 1000m is equals to 1).\n" + memory: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits for the specified init container.\ + \ The suffix `Mi`\n or `Gi` specifies Mebibytes or Gibibytes,\ + \ respectively.\n" + hugePages: + type: object + description: 'RAM limits for huge pages of the specified init + container + + ' + properties: + hugepages-2Mi: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits for huge pages of the specified\ + \ init container with a size of 2Mi. The suffix `Mi`\n\ + \ or `Gi` specifies Mebibytes or Gibibytes, respectively.\n" + hugepages-1Gi: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM limits for huge pages of the specified\ + \ init container with a size of 1Gi. The suffix `Mi`\ + \ or `Gi`\n specifies Mebibytes or Gibibytes, respectively.\n" + requests: + type: object + description: "This section allow to configure the resources requests\ + \ for each container and, if not specified, it is filled with\ + \ some defaults based on the fields `.spec.cpu` and `.spec.memory`\ + \ will be set.\n\nOn containerized environments, when running\ + \ production workloads, enforcing container's resources requirements\ + \ requests to be equals to the limits in order to achieve the\ + \ highest level of performance. Doing so, reduces the chances\ + \ of leaving\n the workload with less resources than it requires.\ + \ It also allow to set [static CPU management policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs on the\ + \ node.\n There are cases where you may need to set cpu requests\ + \ to the same value as cpu limits in order to achieve static CPU\ + \ management policy.\n\nBy default the resources requests values\ + \ in fields `.spec.requests.cpu` and `.spec.requests.memory` represent\ + \ the total resources requests assigned to each resource's Pod\ + \ that reference this SGInstanceProfile.\n The resources requests\ + \ of the patroni container (that runs both Patroni and PostgreSQL)\ + \ is calculated by subtracting from the total resources requests\ + \ the resources requests of other containers that are present\ + \ in the Pod.\n To change this behavior and having the resources\ + \ requests values in fields `.spec.requests.cpu` and `.spec.requests.memory`\ + \ to represent the resources requests of the patroni container\ + \ and the total resources requests\n calculated by adding the\ + \ resources requests of all the containers (including the patroni\ + \ container) you may set one or more of the following fields to\ + \ `true`\n (depending on the resource's Pods you need this behaviour\ + \ to be changed):\n \n* `SGCluster.spec.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.coordinator.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.ovewrites.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGDistributedLogs.spec.resources.disableResourcesRequestsSplitFromTotal`\n" + properties: + cpu: + type: string + pattern: ^[1-9][0-9]*[m]?$ + description: "CPU(s) (cores) requests for every resource's Pod\ + \ that reference this SGInstanceProfile. The suffix `m`\n\ + \ specifies millicpus (where 1000m is equals to 1).\n\nBy\ + \ default the cpu requests values in field `.spec.requests.cpu`\ + \ represent the total cpu requests assigned to each resource's\ + \ Pod that reference this SGInstanceProfile.\n The cpu requests\ + \ of the patroni container (that runs both Patroni and PostgreSQL)\ + \ is calculated by subtracting from the total cpu requests\ + \ the cpu requests of other containers that are present in\ + \ the Pod.\n To change this behavior and having the cpu requests\ + \ values in field `.spec.requests.cpu` to represent the cpu\ + \ requests of the patroni container and the total cpu requests\n\ + \ calculated by adding the cpu requests of all the containers\ + \ (including the patroni container) you may set one or more\ + \ of the following fields to `true`\n (depending on the resource's\ + \ Pods you need this behaviour to be changed):\n \n* `SGCluster.spec.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.coordinator.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.ovewrites.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGDistributedLogs.spec.resources.disableResourcesRequestsSplitFromTotal`\n" + memory: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM requests for every resource's Pod that reference\ + \ this SGInstanceProfile. The suffix `Mi` or `Gi`\n specifies\ + \ Mebibytes or Gibibytes, respectively.\n\nBy default the\ + \ memory requests values in field `.spec.requests.memory`\ + \ represent the total memory requests assigned to each resource's\ + \ Pod that reference this SGInstanceProfile.\n The memory\ + \ requests of the patroni container (that runs both Patroni\ + \ and PostgreSQL) is calculated by subtracting from the total\ + \ memory requests the memory requests of other containers\ + \ that are present in the Pod.\n To change this behavior and\ + \ having the memory requests values in field `.spec.requests.memory`\ + \ to represent the memory requests of the patroni container\ + \ and the total memory requests\n calculated by adding the\ + \ memory requests of all the containers (including the patroni\ + \ container) you may set one or more of the following fields\ + \ to `true`\n (depending on the resource's Pods you need this\ + \ behaviour to be changed):\n \n* `SGCluster.spec.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.coordinator.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGShardedCluster.spec.shards.ovewrites.pods.resources.disableResourcesRequestsSplitFromTotal`\n\ + * `SGDistributedLogs.spec.resources.disableResourcesRequestsSplitFromTotal`\n" + containers: + type: object + description: 'The CPU(s) (cores) and RAM requests assigned to + containers other than patroni container. + + ' + additionalProperties: + type: object + description: "The CPU(s) (cores) and RAM requests assigned\ + \ to a container.\n\nThis section, if left empty, will be\ + \ filled automatically by the operator with\n some defaults\ + \ that can be proportional to the resources assigned to\ + \ patroni\n container (except for the huge pages that are\ + \ always left untouched).\n" + properties: + cpu: + type: string + pattern: ^[1-9][0-9]*[m]?$ + description: "CPU(s) (cores) requests for the specified\ + \ container. The suffix `m`\n specifies millicpus (where\ + \ 1000m is equals to 1).\n" + memory: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM requests for the specified container.\ + \ The suffix `Mi` or `Gi`\n specifies Mebibytes or\ + \ Gibibytes, respectively.\n" + initContainers: + type: object + description: The CPU(s) (cores) and RAM requests assigned to + init containers. + additionalProperties: + type: object + description: "The CPU(s) (cores) and RAM requests assigned\ + \ to a init container.\n\nThis section will be filled automatically\ + \ by the operator with\n the same values of the resources\ + \ requests assigned to patroni\n container (except for\ + \ the huge pages that are always left untouched).\n" + properties: + cpu: + type: string + pattern: ^[1-9][0-9]*[m]?$ + description: "CPU(s) (cores) requests for the specified\ + \ init container. The suffix\n `m` specifies millicpus\ + \ (where 1000m is equals to 1).\n" + memory: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi)$ + description: "RAM requests for the specified init container.\ + \ The suffix `Mi`\n or `Gi` specifies Mebibytes or\ + \ Gibibytes, respectively.\n" diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgobjectstorages.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgobjectstorages.yaml new file mode 100644 index 00000000000..057f181e302 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgobjectstorages.yaml @@ -0,0 +1,431 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgobjectstorages.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGObjectStorage + listKind: SGObjectStorageList + plural: sgobjectstorages + singular: sgobjectstorage + shortNames: + - sgobjs + versions: + - name: v1beta1 + served: true + storage: true + additionalPrinterColumns: + - name: type + type: string + jsonPath: .spec.type + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + description: 'Name of the Object Storage configuration. + + The name must be unique across all object storage configurations + in the same namespace. + + ' + spec: + type: object + description: 'Object Storage configuration + + ' + properties: + type: + type: string + enum: + - s3 + - s3Compatible + - gcs + - azureBlob + description: "Determine the type of object storage used for storing\ + \ the base backups and WAL segments.\n Possible values:\n\ + \ * `s3`: Amazon Web Services S3 (Simple Storage Service).\n\ + \ * `s3Compatible`: non-AWS services that implement a compatibility\ + \ API with AWS S3.\n * `gcs`: Google Cloud Storage.\n \ + \ * `azureBlob`: Microsoft Azure Blob Storage.\n" + s3: + type: object + description: 'Amazon Web Services S3 configuration. + + ' + properties: + bucket: + type: string + pattern: ^((s3|https?)://)?[^/]+(/[^/]*)*$ + description: 'AWS S3 bucket name. + + ' + region: + type: string + description: 'The AWS S3 region. The Region may be detected + using s3:GetBucketLocation, but if you wish to avoid giving + permissions to this API call or forbid it from the applicable + IAM policy, you must then specify this property. + + ' + storageClass: + type: string + description: 'The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + to use for the backup object storage. By default, the `STANDARD` + storage class is used. Other supported values include `STANDARD_IA` + for Infrequent Access and `REDUCED_REDUNDANCY`. + + ' + awsCredentials: + type: object + description: 'The credentials to access AWS S3 for writing and + reading. + + ' + properties: + secretKeySelectors: + type: object + description: 'Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) + to reference the Secrets that contain the information + about the `awsCredentials`. Note that you may use the + same or different Secrets for the `accessKeyId` and the + `secretAccessKey`. In the former case, the `keys` that + identify each must be, obviously, different. + + ' + properties: + accessKeyId: + type: object + description: 'AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `AKIAIOSFODNN7EXAMPLE`. + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + secretAccessKey: + type: object + description: 'AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - accessKeyId + - secretAccessKey + required: + - secretKeySelectors + required: + - bucket + - awsCredentials + s3Compatible: + type: object + description: AWS S3-Compatible API configuration + properties: + bucket: + type: string + pattern: ^((s3|https?)://)?[^/]+(/[^/]*)*$ + description: 'Bucket name. + + ' + enablePathStyleAddressing: + type: boolean + description: 'Enable path-style addressing (i.e. `http://s3.amazonaws.com/BUCKET/KEY`) + when connecting to an S3-compatible service that lacks support + for sub-domain style bucket URLs (i.e. `http://BUCKET.s3.amazonaws.com/KEY`). + + + Defaults to false. + + ' + endpoint: + type: string + description: 'Overrides the default url to connect to an S3-compatible + service. + + For example: `http://s3-like-service:9000`. + + ' + region: + type: string + description: 'The AWS S3 region. The Region may be detected + using s3:GetBucketLocation, but if you wish to avoid giving + permissions to this API call or forbid it from the applicable + IAM policy, you must then specify this property. + + ' + storageClass: + type: string + description: 'The [Amazon S3 Storage Class](https://aws.amazon.com/s3/storage-classes/) + to use for the backup object storage. By default, the `STANDARD` + storage class is used. Other supported values include `STANDARD_IA` + for Infrequent Access and `REDUCED_REDUNDANCY`. + + ' + awsCredentials: + type: object + description: 'The credentials to access AWS S3 for writing and + reading. + + ' + properties: + secretKeySelectors: + type: object + description: 'Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) + to reference the Secret(s) that contain the information + about the `awsCredentials`. Note that you may use the + same or different Secrets for the `accessKeyId` and the + `secretAccessKey`. In the former case, the `keys` that + identify each must be, obviously, different. + + ' + properties: + accessKeyId: + type: object + description: 'AWS [access key ID](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `AKIAIOSFODNN7EXAMPLE`. + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + secretAccessKey: + type: object + description: 'AWS [secret access key](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + For example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`. + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + caCertificate: + type: object + description: 'CA Certificate file to be used when connecting + to the S3 Compatible Service. + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - accessKeyId + - secretAccessKey + required: + - secretKeySelectors + required: + - bucket + - awsCredentials + gcs: + type: object + description: 'Google Cloud Storage configuration. + + ' + properties: + bucket: + type: string + pattern: ^(gs://)?[^/]+(/[^/]*)*$ + description: 'GCS bucket name. + + ' + gcpCredentials: + type: object + description: 'The credentials to access GCS for writing and + reading. + + ' + properties: + fetchCredentialsFromMetadataService: + type: boolean + description: 'If true, the credentials will be fetched from + the GCE/GKE metadata service and the field `secretKeySelectors` + have to be set to null or omitted. + + + This is useful when running StackGres inside a GKE cluster + using [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity). + + ' + secretKeySelectors: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + to reference the Secrets that contain the information + about the Service Account to access GCS. + + ' + properties: + serviceAccountJSON: + type: object + description: 'A service account key from GCP. In JSON + format, as downloaded from the GCP Console. + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - serviceAccountJSON + required: + - bucket + - gcpCredentials + azureBlob: + type: object + description: 'Azure Blob Storage configuration. + + ' + properties: + bucket: + type: string + pattern: ^(azure://)?[^/]+(/[^/]*)*$ + description: 'Azure Blob Storage bucket name. + + ' + azureCredentials: + type: object + description: 'The credentials to access Azure Blob Storage for + writing and reading. + + ' + properties: + secretKeySelectors: + type: object + description: 'Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)(s) + to reference the Secret(s) that contain the information + about the `azureCredentials`. . Note that you may use + the same or different Secrets for the `storageAccount` + and the `accessKey`. In the former case, the `keys` that + identify each must be, obviously, different. + + ' + properties: + storageAccount: + type: object + description: 'The [Storage Account](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview?toc=/azure/storage/blobs/toc.json) + that contains the Blob bucket to be used. + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + accessKey: + type: object + description: 'The [storage account access key](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal). + + ' + properties: + key: + type: string + description: 'The key of the secret to select from. + Must be a valid secret key. + + ' + name: + type: string + description: 'Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + + ' + required: + - key + - name + required: + - storageAccount + - accessKey + required: + - bucket + - azureCredentials + required: + - type diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgpgconfigs.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgpgconfigs.yaml new file mode 100644 index 00000000000..1075e38e30d --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgpgconfigs.yaml @@ -0,0 +1,97 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgpgconfigs.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGPostgresConfig + listKind: SGPostgresConfigList + plural: sgpgconfigs + singular: sgpgconfig + shortNames: + - sgpgc + - sgpostgresconfig + - sgpostgresconfigs + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: version + type: string + jsonPath: .spec.postgresVersion + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + description: 'Name of the Postgres Configuration. The configuration + may be referenced by zero or more SGClusters, and if so it would + be referenced by its name. Following [Kubernetes naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + The name must be unique across all Postgres configurations in + the same namespace. + + ' + spec: + type: object + properties: + postgresVersion: + type: string + description: 'The **major** Postgres version the configuration is + for. Postgres major versions contain one number starting with + version 10 (`10`, `11`, `12`, etc), and two numbers separated + by a dot for previous versions (`9.6`, `9.5`, etc). + + + Note that Postgres maintains full compatibility across minor versions, + and hence a configuration for a given major version will work + for any minor version of that same major version. + + + Check [StackGres component versions](https://stackgres.io/doc/latest/intro/versions) + to see the Postgres versions supported by this version of StackGres. + + ' + postgresql.conf: + type: object + additionalProperties: + type: string + description: 'The `postgresql.conf` parameters the configuration + contains, represented as an object where the keys are valid names + for the `postgresql.conf` configuration file parameters of the + given `postgresVersion`. You may check [postgresqlco.nf](https://postgresqlco.nf) + as a reference on how to tune and find the valid parameters for + a given major version. + + ' + required: + - postgresVersion + - postgresql.conf + status: + type: object + properties: + defaultParameters: + type: object + additionalProperties: + type: string + description: 'The `postgresql.conf` default parameters which are + used if not set. + + ' + required: + - defaultParameters diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgpoolconfigs.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgpoolconfigs.yaml new file mode 100644 index 00000000000..4d72a0a8dc7 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgpoolconfigs.yaml @@ -0,0 +1,129 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgpoolconfigs.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGPoolingConfig + listKind: SGPoolingConfigList + plural: sgpoolconfigs + singular: sgpoolconfig + shortNames: + - sgpoc + - sgpoolingconfig + - sgpoolingconfigs + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + description: 'Name of the Connection Pooling Configuration. The + configuration may be referenced by zero or more SGClusters, and + if so it would be referenced by its name. Following [Kubernetes + naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + The name must be unique across all Connection Pooling configurations + in the same namespace. + + ' + spec: + type: object + properties: + pgBouncer: + type: object + description: 'Connection pooling configuration based on PgBouncer. + + ' + properties: + pgbouncer.ini: + type: object + description: 'The `pgbouncer.ini` parameters the configuration + contains, represented as an object where the keys are valid + names for the `pgbouncer.ini` configuration file parameters. + + + Check [pgbouncer configuration](https://www.pgbouncer.org/config.html#generic-settings) + for more information about supported parameters. + + ' + properties: + pgbouncer: + type: object + additionalProperties: true + description: 'The `pgbouncer.ini` (Section [pgbouncer]) + parameters the configuration contains, represented as + an object where the keys are valid names for the `pgbouncer.ini` + configuration file parameters. + + + Check [pgbouncer configuration](https://www.pgbouncer.org/config.html#generic-settings) + for more information about supported parameters + + ' + databases: + type: object + additionalProperties: + type: object + additionalProperties: true + description: 'The `pgbouncer.ini` (Section [databases]) + parameters the configuration contains, represented as + an object where the keys are valid names for the `pgbouncer.ini` + configuration file parameters. + + + Check [pgbouncer configuration](https://www.pgbouncer.org/config.html#section-databases) + for more information about supported parameters. + + ' + users: + type: object + additionalProperties: + type: object + additionalProperties: true + description: 'The `pgbouncer.ini` (Section [users]) parameters + the configuration contains, represented as an object where + the keys are valid names for the `pgbouncer.ini` configuration + file parameters. + + + Check [pgbouncer configuration](https://www.pgbouncer.org/config.html#section-users) + for more information about supported parameters. + + ' + status: + type: object + properties: + pgBouncer: + type: object + description: 'Connection pooling configuration status based on PgBouncer. + + ' + properties: + defaultParameters: + type: object + additionalProperties: + type: string + description: 'The `pgbouncer.ini` default parameters parameters + which are used if not set. + + ' + required: + - defaultParameters diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgscripts.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgscripts.yaml new file mode 100644 index 00000000000..8ae6a90e012 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgscripts.yaml @@ -0,0 +1,236 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgscripts.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGScript + listKind: SGScriptList + plural: sgscripts + singular: sgscript + shortNames: + - sgscr + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: scripts + type: string + jsonPath: .spec.scripts.length + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 52 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the StackGres script. Following [Kubernetes + naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + The name must be unique across all StackGres scripts in the same + namespace. The full script name includes the namespace in which + the script is created. + + ' + spec: + type: object + properties: + managedVersions: + type: boolean + description: 'If `true` the versions will be managed by the operator + automatically. The user will still be able to update them if needed. + `true` by default. + + ' + continueOnError: + type: boolean + description: 'If `true`, when any script entry fail will not prevent + subsequent script entries from being executed. `false` by default. + + ' + scripts: + type: array + description: 'A list of SQL scripts. + + ' + x-kubernetes-list-map-keys: + - id + x-kubernetes-list-type: map + items: + type: object + description: 'Scripts are executed in auto-commit mode with the + user `postgres` in the specified database (or in database `postgres` + if not specified). + + + Fields `script` and `scriptFrom` are mutually exclusive and + only one of them is required. + + ' + properties: + name: + type: string + description: 'Name of the script. Must be unique across this + SGScript. + + ' + id: + type: integer + default: -1 + description: 'The id is immutable and must be unique across + all the script entries. It is replaced by the operator and + is used to identify the script for the whole life of the + `SGScript` object. + + ' + version: + type: integer + description: 'Version of the script. It will allow to identify + if this script entry has been changed. + + ' + database: + type: string + description: 'Database where the script is executed. Defaults + to the `postgres` database, if not specified. + + ' + user: + type: string + description: 'User that will execute the script. Defaults + to the `postgres` user. + + ' + wrapInTransaction: + type: string + description: 'Wrap the script in a transaction using the specified + transaction mode: + + + * `read-committed`: The script will be wrapped in a transaction + using [READ COMMITTED](https://www.postgresql.org/docs/current/transaction-iso.html#XACT-READ-COMMITTED) + isolation level. + + * `repeatable-read`: The script will be wrapped in a transaction + using [REPEATABLE READ](https://www.postgresql.org/docs/current/transaction-iso.html#XACT-REPEATABLE-READ) + isolation level. + + * `serializable`: The script will be wrapped in a transaction + using [SERIALIZABLE](https://www.postgresql.org/docs/current/transaction-iso.html#XACT-SERIALIZABLE) + isolation level. + + + If not set the script entry will not be wrapped in a transaction + + ' + storeStatusInDatabase: + type: boolean + description: "When set to `true` the script entry execution\ + \ will include storing the status of the execution of this\n\ + \ script entry in the table `managed_sql.status` that will\ + \ be created in the specified `database`. This\n will avoid\ + \ an operation that fails partially to be unrecoverable\ + \ requiring the intervention from the user\n if user in\ + \ conjunction with `retryOnError`.\n\nIf set to `true` then\ + \ `wrapInTransaction` field must be set.\n\nThis is `false`\ + \ by default.\n" + retryOnError: + type: boolean + description: "If not set or set to `false` the script entry\ + \ will not be retried if it fails.\n\nWhen set to `true`\ + \ the script execution will be retried with an exponential\ + \ backoff of 5 minutes,\n starting from 10 seconds and\ + \ a standard deviation of 10 seconds.\n\nThis is `false`\ + \ by default.\n" + script: + type: string + description: 'Raw SQL script to execute. This field is mutually + exclusive with `scriptFrom` field. + + ' + scriptFrom: + type: object + description: 'Reference to either a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) + or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + that contains the SQL script to execute. This field is mutually + exclusive with `script` field. + + + Fields `secretKeyRef` and `configMapKeyRef` are mutually + exclusive, and one of them is required. + + ' + properties: + secretKeyRef: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the SQL script to execute. This field + is mutually exclusive with `configMapKeyRef` field. + + ' + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + configMapKeyRef: + type: object + description: 'A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + reference that contains the SQL script to execute. This + field is mutually exclusive with `secretKeyRef` field. + + ' + properties: + name: + type: string + description: 'The name of the ConfigMap that contains + the SQL script to execute. + + ' + key: + type: string + description: 'The key name within the ConfigMap that + contains the SQL script to execute. + + ' + status: + type: object + properties: + scripts: + type: array + description: 'A list of script entry statuses where a script entry + under `.spec.scripts` is identified by the `id` field. + + ' + items: + type: object + properties: + id: + type: integer + description: 'The id that identifies a script entry. + + ' + hash: + type: string + description: 'The hash of a ConfigMap or Secret referenced + with the associated script entry. + + ' diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardedbackups.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardedbackups.yaml new file mode 100644 index 00000000000..c7f5f5e1fcf --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardedbackups.yaml @@ -0,0 +1,211 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgshardedbackups.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGShardedBackup + listKind: SGShardedBackupList + plural: sgshardedbackups + singular: sgshardedbackup + shortNames: + - sgsbk + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: cluster + type: string + jsonPath: .spec.sgShardedCluster + - name: managed + type: string + jsonPath: .spec.managedLifecycle + - name: status + type: string + jsonPath: .status.process.status + - name: pg-version + type: string + jsonPath: .status.backupInformation.postgresVersion + priority: 1 + - name: compressed-size + type: integer + format: byte + jsonPath: .status.backupInformation.size.compressed + priority: 1 + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + description: "A manual or automatically generated sharded backup of an SGCluster\ + \ configured with an SGBackupConfig.\n\nWhen a SGBackup is created a Job\ + \ will perform a full sharded backup of the database and update the status\ + \ of the SGBackup\n with the all the information required to restore it\ + \ and some stats (or a failure message in case something unexpected\n\ + \ happened).\nAfter an SGBackup is created the same Job performs a reconciliation\ + \ of the sharded backups by applying the retention window\n that has been\ + \ configured in the SGBackupConfig and removing the sharded backups with\ + \ managed lifecycle and the WAL files older\n than the ones that fit in\ + \ the retention window. The reconciliation also removes sharded backups\ + \ (excluding WAL files) that do\n not belongs to any SGBackup. If the\ + \ target storage of the SGBackupConfig is changed deletion of an SGBackup\ + \ sharded backups\n with managed lifecycle and the WAL files older than\ + \ the ones that fit in the retention window and of sharded backups that\ + \ do\n not belongs to any SGBackup will not be performed anymore on the\ + \ previous storage, only on the new target storage.\n" + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 56 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the sharded backup. Following [Kubernetes + naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + The name must be unique across all StackGres sharded backups in + the same namespace." + + ' + spec: + type: object + properties: + sgShardedCluster: + type: string + description: "The name of the `SGShardedCluster` from which this\ + \ sharded backup is/will be taken.\n\nIf this is a copy of an\ + \ existing completed sharded backup in a different namespace\n\ + \ the value must be prefixed with the namespace of the source\ + \ backup and a\n dot `.` (e.g. `.`) or have the same value\n if the source sharded\ + \ backup is also a copy.\n" + managedLifecycle: + type: boolean + description: "Indicate if this sharded backup is permanent and should\ + \ not be removed by the automated\n retention policy. Default\ + \ is `false`.\n" + timeout: + type: integer + description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation will continue + until the backup completes or fail. If set to 0 is the same as + not being set. + + + Make sure to set a reasonable high value in order to allow for + any unexpected delays during backup creation (network low bandwidth, + disk low throughput and so forth). + + ' + reconciliationTimeout: + type: integer + default: 300 + description: "Allow to set a timeout for the reconciliation process\ + \ that take place after the backup.\n\nIf not set defaults to\ + \ 300 (5 minutes). If set to 0 it will disable timeout.\n\nFailure\ + \ of reconciliation will not make the backup fail and will be\ + \ re-tried the next time a SGBackup\n or shecduled backup Job\ + \ take place.\n" + maxRetries: + type: integer + description: 'The maximum number of retries the backup operation + is allowed to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: + `3`. + + ' + status: + type: object + properties: + sgBackups: + type: array + description: 'The list of SGBackups that compose the SGShardedBackup + used to restore the sharded cluster. + + ' + items: + type: string + description: 'One of the SGBackups that compose the SGShardedBackup + used to restore the sharded cluster. + + ' + process: + type: object + properties: + status: + type: string + description: 'Status of the sharded backup. + + ' + failure: + type: string + description: 'If the status is `failed` this field will contain + a message indicating the failure reason. + + ' + jobPod: + type: string + description: 'Name of the pod assigned to the sharded backup. + StackGres utilizes internally a locking mechanism based on + the pod name of the job that creates the sharded backup. + + ' + timing: + type: object + properties: + start: + type: string + description: 'Start time of sharded backup. + + ' + end: + type: string + description: 'End time of sharded backup. + + ' + stored: + type: string + description: 'Time at which the sharded backup is safely + stored in the object storage. + + ' + backupInformation: + type: object + properties: + postgresVersion: + type: string + description: 'Postgres version of the server where the sharded + backup is taken from. + + ' + size: + type: object + properties: + uncompressed: + type: integer + format: int64 + description: 'Size (in bytes) of the uncompressed sharded + backup. + + ' + compressed: + type: integer + format: int64 + description: 'Size (in bytes) of the compressed sharded + backup. + + ' diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardedclusters.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardedclusters.yaml new file mode 100644 index 00000000000..7781aa52005 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardedclusters.yaml @@ -0,0 +1,27639 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgshardedclusters.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGShardedCluster + listKind: SGShardedClusterList + plural: sgshardedclusters + singular: sgshardedcluster + shortNames: + - sgscl + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - name: version + type: string + jsonPath: .spec.postgres.version + - name: instances + type: integer + jsonPath: .spec.coordinator.instances + (.spec.shards.clusters * .spec.shards.instancesPerCluster) + - name: Profile + type: string + jsonPath: .spec.coordinator.sgInstanceProfile + ", " + .spec.shards.sgInstanceProfile + - name: Disk + type: string + jsonPath: .spec.coordinator.pods.persistentVolume.size + ", " + .spec.shards.pods.persistentVolume.size + - name: prometheus-autobind + type: string + jsonPath: .spec.configurations.observability.prometheusAutobind + priority: 1 + - name: pool-config + type: string + jsonPath: .spec.coordinator.configurations.sgPoolingConfig + ", " + .spec.shards.configurations.sgPoolingConfig + priority: 1 + - name: postgres-config + type: string + jsonPath: .spec.coordinator.configurations.sgPostgresConfig + ", " + .spec.shards.configurations.sgPostgresConfig + priority: 1 + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 37 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the StackGres sharded cluster. Following [Kubernetes + naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + + The name must be unique across all SGCluster, SGShardedCluster + and SGDistributedLogs in the same namespace. + + ' + spec: + type: object + description: 'Specification of the desired behavior of a StackGres sharded + cluster. + + ' + required: + - database + - postgres + - coordinator + - shards + properties: + profile: + type: string + description: "The profile allow to change in a convenient place\ + \ a set of configuration defaults that affect how the cluster\ + \ is generated.\n\nAll those defaults can be overwritten by setting\ + \ the correspoinding fields.\n\nAvailable profiles are:\n\n* `production`:\n\ + \n Prevents two Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `false` by default).\n Sets both limits and requests using\ + \ `SGInstanceProfile` for `patroni` container that runs both Patroni\ + \ and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced\ + \ `SGInstanceProfile` for sidecar containers other than `patroni`\ + \ (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `testing`:\n\n Allows two Pods\ + \ to running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Sets both limits and requests using\ + \ `SGInstanceProfile` for `patroni` container that runs both Patroni\ + \ and Postgres (set `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `false` by default).\n Sets requests using the referenced\ + \ `SGInstanceProfile` for sidecar containers other than `patroni`\ + \ (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `false` by default).\n\n* `development`:\n\n Allows two\ + \ Pods from running in the same Node (set `.spec.nonProductionOptions.disableClusterPodAntiAffinity`\ + \ to `true` by default).\n Unset both limits and requests for\ + \ `patroni` container that runs both Patroni and Postgres (set\ + \ `.spec.nonProductionOptions.disablePatroniResourceRequirements`\ + \ to `true` by default).\n Unsets requests for sidecar containers\ + \ other than `patroni` (set `.spec.nonProductionOptions.disableClusterResourceRequirements`\ + \ to `true` by default).\n\n**Changing this field may require\ + \ a restart.**\n" + default: production + type: + type: string + description: 'The sharding technology that will be used for the + sharded cluster. + + + Available technologies are: + + + * `citus` + + * `ddp` + + * `shardingsphere` + + + **Citus** + + + Citus is a PostgreSQL extension that transforms Postgres into + a distributed database—so you can achieve high performance at + any scale. + + + See also https://github.com/citusdata/citus + + + **DDP** + + + DDP (Distributed Data Partitioning) allows you to distribute data + across different physical nodes to improve the query performance + of high data volumes, taking advantage of distinct nodes’ resources. + Using the entry point named coordinator in charge of sending/distributing + the queries to different nodes named shards. + + + **ShardingSphere** + + + Apache ShardingSphere is an ecosystem to transform any database + into a distributed database system, and enhance it with sharding, + elastic scaling, encryption features & more. + + + StackGres implementation of ShardingSphere as a sharding technology + uses the [ShardingSphere Proxy](https://shardingsphere.apache.org/document/current/en/quick-start/shardingsphere-proxy-quick-start/) + as an entry point to distribute SQL traffic among the shards. + + + This implementation requires the [ShardingSphere Operator](https://shardingsphere.apache.org/oncloud/current/en/user-manual/cn-sn-operator/) + to be installed and will create a ComputeNode + + ' + database: + type: string + description: 'The database name that will be created and used across + all node and where "partitioned" (distributed) tables will live + in. + + ' + postgres: + type: object + description: 'This section allows to configure Postgres features + + ' + required: + - version + properties: + version: + type: string + description: 'Postgres version used on the cluster. It is either + of: + + * The string ''latest'', which automatically sets the latest + major.minor Postgres version. + + * A major version, like ''14'' or ''13'', which sets that + major version and the latest minor version. + + * A specific major.minor version, like ''14.4''. + + ' + flavor: + type: string + description: 'Postgres flavor used on the cluster. It is either + of: + + * `babelfish` will use the [Babelfish for Postgres](https://babelfish-for-postgresql.github.io/babelfish-for-postgresql/). + + + If not specified then the vanilla Postgres will be used for + the cluster. + + + **This field can only be set on creation.** + + ' + extensions: + type: array + description: "StackGres support deploy of extensions at runtime\ + \ by simply adding an entry to this array. A deployed extension\ + \ still\nrequires the creation in a database using the [`CREATE\ + \ EXTENSION`](https://www.postgresql.org/docs/current/sql-createextension.html)\n\ + statement. After an extension is deployed correctly it will\ + \ be present until removed and the cluster restarted.\n\n\ + A cluster restart is required for:\n* Extensions that requires\ + \ to add an entry to [`shared_preload_libraries`](https://postgresqlco.nf/en/doc/param/shared_preload_libraries/)\ + \ configuration parameter.\n* Upgrading extensions that overwrite\ + \ any file that is not the extension''s control file or extension''s\ + \ script file.\n* Removing extensions. Until the cluster is\ + \ not restarted a removed extension will still be available.\n\ + * Install of extensions that require extra mount. After installed\ + \ the cluster will require to be restarted.\n\n**Exmaple:**\n\ + \n``` yaml\napiVersion: stackgres.io/v1alpha1\nkind: SGShardedCluster\n\ + metadata:\n name: stackgres\nspec:\n postgres:\n extensions:\n\ + \ - {name: 'timescaledb', version: '2.3.1'}\n```\n" + items: + type: object + required: + - name + properties: + name: + type: string + description: The name of the extension to deploy. + publisher: + type: string + description: The id of the publisher of the extension + to deploy. If not specified `com.ongres` will be used + by default. + version: + type: string + description: The version of the extension to deploy. If + not specified version of `stable` channel will be used + by default. + repository: + type: string + description: 'The repository base URL from where to obtain + the extension to deploy. + + + **This section is filled by the operator.** + + ' + ssl: + type: object + description: "This section allows to use SSL when connecting\ + \ to Postgres\n\n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1alpha1\n\ + kind: SGShardedCluster\nmetadata:\n name: stackgres\nspec:\n\ + \ postgres:\n ssl:\n enabled: true\n certificateSecretKeySelector:\n\ + \ name: stackgres-secrets\n key: cert\n \ + \ privateKeySecretKeySelector:\n name: stackgres-secrets\n\ + \ key: key\n```\n" + properties: + enabled: + type: boolean + description: 'Allow to enable SSL for connections to Postgres. + By default is `true`. + + + If `true` certificate and private key will be auto-generated + unless fields `certificateSecretKeySelector` and `privateKeySecretKeySelector` + are specified. + + ' + certificateSecretKeySelector: + type: object + description: 'Secret key selector for the certificate or + certificate chain used for SSL connections. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The name of Secret that contains the certificate + or certificate chain for SSL connections + + ' + key: + type: string + description: 'The key of Secret that contains the certificate + or certificate chain for SSL connections + + ' + privateKeySecretKeySelector: + type: object + description: 'Secret key selector for the private key used + for SSL connections. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The name of Secret that contains the private + key for SSL connections + + ' + key: + type: string + description: 'The key of Secret that contains the private + key for SSL connections + + ' + replication: + type: object + description: "This section allows to configure the global Postgres\ + \ replication mode.\n\nThe main replication group is implicit\ + \ and contains the total number of instances less the sum of all\n\ + \ instances in other replication groups.\n\nThe total number\ + \ of instances is always specified by `.spec.instances`.\n" + properties: + mode: + type: string + description: "The replication mode applied to the whole cluster.\n\ + Possible values are:\n* `async` (default)\n* `sync`\n* `strict-sync`\n\ + * `sync-all`\n* `strict-sync-all`\n\n**async**\n\nWhen in\ + \ asynchronous mode the cluster is allowed to lose some committed\ + \ transactions.\n When the primary server fails or becomes\ + \ unavailable for any other reason a sufficiently healthy\ + \ standby\n will automatically be promoted to primary. Any\ + \ transactions that have not been replicated to that standby\n\ + \ remain in a \"forked timeline\" on the primary, and are\ + \ effectively unrecoverable (the data is still there,\n but\ + \ recovering it requires a manual recovery effort by data\ + \ recovery specialists).\n\n**sync**\n\nWhen in synchronous\ + \ mode a standby will not be promoted unless it is certain\ + \ that the standby contains all\n transactions that may have\ + \ returned a successful commit status to client (clients can\ + \ change the behavior\n per transaction using PostgreSQL’s\ + \ `synchronous_commit` setting. Transactions with `synchronous_commit`\n\ + \ values of `off` and `local` may be lost on fail over, but\ + \ will not be blocked by replication delays). This\n means\ + \ that the system may be unavailable for writes even though\ + \ some servers are available. System\n administrators can\ + \ still use manual failover commands to promote a standby\ + \ even if it results in transaction\n loss.\n\nSynchronous\ + \ mode does not guarantee multi node durability of commits\ + \ under all circumstances. When no suitable\n standby is\ + \ available, primary server will still accept writes, but\ + \ does not guarantee their replication. When\n the primary\ + \ fails in this mode no standby will be promoted. When the\ + \ host that used to be the primary comes\n back it will get\ + \ promoted automatically, unless system administrator performed\ + \ a manual failover. This behavior\n makes synchronous mode\ + \ usable with 2 node clusters.\n\nWhen synchronous mode is\ + \ used and a standby crashes, commits will block until the\ + \ primary is switched to standalone\n mode. Manually shutting\ + \ down or restarting a standby will not cause a commit service\ + \ interruption. Standby will\n signal the primary to release\ + \ itself from synchronous standby duties before PostgreSQL\ + \ shutdown is initiated.\n\n**strict-sync**\n\nWhen it is\ + \ absolutely necessary to guarantee that each write is stored\ + \ durably on at least two nodes, use the strict\n synchronous\ + \ mode. This mode prevents synchronous replication to be switched\ + \ off on the primary when no synchronous\n standby candidates\ + \ are available. As a downside, the primary will not be available\ + \ for writes (unless the Postgres\n transaction explicitly\ + \ turns off `synchronous_mode` parameter), blocking all client\ + \ write requests until at least one\n synchronous replica\ + \ comes up.\n\n**Note**: Because of the way synchronous replication\ + \ is implemented in PostgreSQL it is still possible to lose\n\ + \ transactions even when using strict synchronous mode. If\ + \ the PostgreSQL backend is cancelled while waiting to acknowledge\n\ + \ replication (as a result of packet cancellation due to\ + \ client timeout or backend failure) transaction changes become\n\ + \ visible for other backends. Such changes are not yet replicated\ + \ and may be lost in case of standby promotion.\n\n**sync-all**\n\ + \nThe same as `sync` but `syncInstances` is ignored and the\ + \ number of synchronous instances is equals to the total number\n\ + \ of instances less one.\n\n**strict-sync-all**\n\nThe same\ + \ as `strict-sync` but `syncInstances` is ignored and the\ + \ number of synchronous instances is equals to the total number\n\ + \ of instances less one.\n" + default: async + syncInstances: + type: integer + minimum: 1 + description: "Number of synchronous standby instances. Must\ + \ be less than the total number of instances. It is set to\ + \ 1 by default.\n Only setteable if mode is `sync` or `strict-sync`.\n" + initialization: + type: object + description: 'Allow to specify how the replicas are initialized. + + ' + properties: + mode: + type: string + description: "Allow to specify how the replicas are initialized.\n\ + \nPossible values are:\n\n* `FromPrimary`: When this mode\ + \ is used replicas will be always created from the primary\ + \ using `pg_basebackup`.\n* `FromReplica`: When this mode\ + \ is used replicas will be created from another existing\ + \ replica using\n `pg_basebackup`. Fallsback to `FromPrimary`\ + \ if there's no replica or it fails.\n* `FromExistingBackup`:\ + \ When this mode is used replicas will be created from\ + \ an existing SGBackup. If `backupNewerThan` is set\n\ + \ the SGBackup must be newer than its value. When this\ + \ mode fails to restore an SGBackup it will try with a\ + \ previous one (if exists).\n Fallsback to `FromReplica`\ + \ if there's no backup left or it fails.\n* `FromNewlyCreatedBackup`:\ + \ When this mode is used replicas will be created from\ + \ a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent backup\ + \ newer than its value or it fails.\n" + default: FromExistingBackup + backupNewerThan: + type: string + description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`,\ + \ that specifies how old an SGBackup have to be in order\ + \ to be seleceted\n to initialize a replica.\n\nWhen `FromExistingBackup`\ + \ mode is set this field restrict the selection of SGBackup\ + \ to be used for recovery newer than the\n specified value.\ + \ \n\nWhen `FromNewlyCreatedBackup` mode is set this field\ + \ skip the creation SGBackup to be used for recovery if\ + \ one newer than\n the specified value exists. \n" + backupRestorePerformance: + type: object + description: 'Configuration that affects the backup network + and disk usage performance during recovery. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth used + when storing a backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to read the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to the minimum between the number of file + to read and 10. + + ' + postgresServices: + type: object + description: Kubernetes [services](https://kubernetes.io/docs/concepts/services-networking/service/) + created or managed by StackGres. + properties: + coordinator: + type: object + description: 'Configuration for the coordinator services + + ' + properties: + any: + type: object + description: 'Configure the coordinator service to any instance + of the coordinator with the same name as the SGShardedCluster + plus the `-reads` suffix. + + + If the sharding type is `shardingsphere` then the name + of the service will be the same name as the SGShardedCluster. + + + It provides a stable connection (regardless of node failures) + to any Postgres server of the coordinator cluster. Servers + are load-balanced via this service. + + + See also https://kubernetes.io/docs/concepts/services-networking/service/ + + ' + properties: + enabled: + type: boolean + description: Specify if the service should be created + or not. + type: + type: string + enum: + - ClusterIP + - LoadBalancer + - NodePort + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to + endpoints. + + "NodePort" builds on ClusterIP and allocates a port + on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current + cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if + NodePorts will be automatically allocated for services + with type LoadBalancer. Default is "true". It may + be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests + will be respected, regardless of this field. This + field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any + other type. + type: boolean + externalIPs: + description: 'externalIPs is a list of IP addresses + for which nodes in the cluster will also accept traffic + for this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes + system. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#allocateloadbalancernodeports-v1-core' + items: + type: string + type: array + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes + distribute service traffic they receive on one of + the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", + the proxy will configure the service in a way that + assumes that external load balancers will take care + of balancing the service traffic between nodes, and + so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the + client source IP. (Traffic mistakenly sent to a node + with no endpoints will be dropped.) The default value, + "Cluster", uses the standard behavior of routing to + all endpoints evenly (possibly modified by topology + and other features). Note that traffic sent to an + External IP or LoadBalancer IP from within the cluster + will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is + set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, + a value will be automatically allocated. External + systems (e.g. load-balancers) can use this port to + determine if a given node holds endpoints for this + service or not. If this field is specified when creating + a Service which does not need it, creation will fail. + This field will be wiped when updating a Service to + no longer need it (e.g. changing type). This field + cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes + distribute service traffic they receive on the ClusterIP. + If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the + same node as the pod, dropping the traffic if there + are no local endpoints. The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + type: string + ipFamilies: + description: 'IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is + usually assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the + cluster, and ipFamilyPolicy allows it, it will be + used; otherwise creation of the service will fail. + This field is conditionally mutable: it allows for + adding or removing a secondary IP family, but it does + not allow changing the primary IP family of the Service. + Valid values are "IPv4" and "IPv6". This field only + applies to Services of types ClusterIP, NodePort, + and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to + type ExternalName. + + + This field may hold a maximum of two entries (dual-stack + families, in either order). These families must correspond + to the values of the clusterIPs field, if specified. + Both clusterIPs and ipFamilies are governed by the + ipFamilyPolicy field.' + items: + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is + no value provided, then this field will be set to + SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters), or "RequireDualStack" (two + IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend + on the value of this field. This field will be wiped + when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If + specified, the value of this field must be a label-style + identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are + reserved for end-users. This field can only be set + when the Service type is 'LoadBalancer'. If not set, + the default load balancer implementation is used, + today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any + default load balancer implementation (e.g. cloud providers) + should ignore Services that set this field. This field + can only be set when creating or updating a Service + to type 'LoadBalancer'. Once set, it can not be changed. + This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load + balancer is created. This field will be ignored if + the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies + across implementations. Using it is non-portable and + it may not support dual-stack. Users are encouraged + to use implementation-specific annotations when available.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified + client IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that + any agent which deals with endpoints for this Service + should disregard any indications of ready/not-ready. + The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV + DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints + and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" + even if the Pods themselves are not. Agents which + consume only Kubernetes generated endpoints through + the Endpoints or EndpointSlice resources can safely + assume this behavior. + type: boolean + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to + maintain session affinity. Enable client IP based + session affinity. Must be ClientIP or None. Defaults + to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: 'SessionAffinityConfig represents the configurations + of session affinity. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#sessionaffinityconfig-v1-core' + properties: + clientIP: + description: ClientIPConfig represents the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The + value must be >0 && <=86400(for 1 day) if + ServiceAffinity == "ClientIP". Default value + is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + nodePorts: + type: object + description: nodePorts is a list of ports for exposing + a cluster services to the outside world + properties: + pgport: + type: integer + description: the node port that will be exposed + to connect to Postgres instance + replicationport: + type: integer + description: the node port that will be exposed + to connect to Postgres instance for replication + purpose + primary: + type: object + description: 'Configure the coordinator service to the primary + of the coordinator with the name as the SGShardedCluster. + + + If the sharding type is `shardingsphere` then this service + will be disabled. + + + It provides a stable connection (regardless of primary + failures or switchovers) to the read-write Postgres server + of the coordinator cluster. + + + See also https://kubernetes.io/docs/concepts/services-networking/service/ + + ' + properties: + enabled: + type: boolean + description: Specify if the service should be created + or not. + type: + type: string + enum: + - ClusterIP + - LoadBalancer + - NodePort + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to + endpoints. + + "NodePort" builds on ClusterIP and allocates a port + on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current + cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if + NodePorts will be automatically allocated for services + with type LoadBalancer. Default is "true". It may + be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests + will be respected, regardless of this field. This + field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any + other type. + type: boolean + externalIPs: + description: 'externalIPs is a list of IP addresses + for which nodes in the cluster will also accept traffic + for this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes + system. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#allocateloadbalancernodeports-v1-core' + items: + type: string + type: array + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes + distribute service traffic they receive on one of + the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", + the proxy will configure the service in a way that + assumes that external load balancers will take care + of balancing the service traffic between nodes, and + so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the + client source IP. (Traffic mistakenly sent to a node + with no endpoints will be dropped.) The default value, + "Cluster", uses the standard behavior of routing to + all endpoints evenly (possibly modified by topology + and other features). Note that traffic sent to an + External IP or LoadBalancer IP from within the cluster + will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is + set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, + a value will be automatically allocated. External + systems (e.g. load-balancers) can use this port to + determine if a given node holds endpoints for this + service or not. If this field is specified when creating + a Service which does not need it, creation will fail. + This field will be wiped when updating a Service to + no longer need it (e.g. changing type). This field + cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes + distribute service traffic they receive on the ClusterIP. + If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the + same node as the pod, dropping the traffic if there + are no local endpoints. The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + type: string + ipFamilies: + description: 'IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is + usually assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the + cluster, and ipFamilyPolicy allows it, it will be + used; otherwise creation of the service will fail. + This field is conditionally mutable: it allows for + adding or removing a secondary IP family, but it does + not allow changing the primary IP family of the Service. + Valid values are "IPv4" and "IPv6". This field only + applies to Services of types ClusterIP, NodePort, + and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to + type ExternalName. + + + This field may hold a maximum of two entries (dual-stack + families, in either order). These families must correspond + to the values of the clusterIPs field, if specified. + Both clusterIPs and ipFamilies are governed by the + ipFamilyPolicy field.' + items: + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is + no value provided, then this field will be set to + SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters), or "RequireDualStack" (two + IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend + on the value of this field. This field will be wiped + when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If + specified, the value of this field must be a label-style + identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are + reserved for end-users. This field can only be set + when the Service type is 'LoadBalancer'. If not set, + the default load balancer implementation is used, + today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any + default load balancer implementation (e.g. cloud providers) + should ignore Services that set this field. This field + can only be set when creating or updating a Service + to type 'LoadBalancer'. Once set, it can not be changed. + This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load + balancer is created. This field will be ignored if + the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies + across implementations. Using it is non-portable and + it may not support dual-stack. Users are encouraged + to use implementation-specific annotations when available.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified + client IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that + any agent which deals with endpoints for this Service + should disregard any indications of ready/not-ready. + The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV + DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints + and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" + even if the Pods themselves are not. Agents which + consume only Kubernetes generated endpoints through + the Endpoints or EndpointSlice resources can safely + assume this behavior. + type: boolean + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to + maintain session affinity. Enable client IP based + session affinity. Must be ClientIP or None. Defaults + to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: 'SessionAffinityConfig represents the configurations + of session affinity. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#sessionaffinityconfig-v1-core' + properties: + clientIP: + description: ClientIPConfig represents the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The + value must be >0 && <=86400(for 1 day) if + ServiceAffinity == "ClientIP". Default value + is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + nodePorts: + type: object + description: nodePorts is a list of ports for exposing + a cluster services to the outside world + properties: + pgport: + type: integer + description: the node port that will be exposed + to connect to Postgres instance + replicationport: + type: integer + description: the node port that will be exposed + to connect to Postgres instance for replication + purpose + customPorts: + type: array + description: "The list of custom ports that will be exposed\ + \ by the coordinator services.\n\nThe names of custom\ + \ ports will be prefixed with the string `c-` so they\ + \ do not\n conflict with ports defined for the coordinator\ + \ services.\n\nThe names of target ports will be prefixed\ + \ with the string `c-` so that the ports\n that can be\ + \ referenced in this section will be only those defined\ + \ under\n .spec.pods.customContainers[].ports sections\ + \ were names are also prepended with the same\n prefix.\n\ + \n**Changing this field may require a restart.**\n\nSee:\ + \ https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + items: + description: "A custom port that will be exposed by the\ + \ Postgres coordinator services.\n\nThe name of the\ + \ custom port will be prefixed with the string `c-`\ + \ so it does not\n conflict with ports defined for\ + \ the coordinator services.\n\nThe name of target port\ + \ will be prefixed with the string `c-` so that the\ + \ port\n that can be referenced in this section will\ + \ be only those defined under\n .spec.pods.customContainers[].ports\ + \ sections were names are also prepended with the same\n\ + \ prefix.\n \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + properties: + appProtocol: + description: "The application protocol for this port.\ + \ This is used as a hint for implementations to\ + \ offer richer behavior for protocols that they\ + \ understand. This field follows standard Kubernetes\ + \ label syntax. Valid values are either:\n\n* Un-prefixed\ + \ protocol names - reserved for IANA standard service\ + \ names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\ + \n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c'\ + \ - HTTP/2 prior knowledge over cleartext as described\ + \ in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n\ + \ * 'kubernetes.io/ws' - WebSocket over cleartext\ + \ as described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \ * 'kubernetes.io/wss' - WebSocket over TLS as\ + \ described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \n* Other protocols should use implementation-defined\ + \ prefixed names such as mycompany.com/my-custom-protocol." + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field + in the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this + service is exposed when type is NodePort or LoadBalancer. Usually + assigned by the system. If a value is specified, + in-range, and not in use it will be used, otherwise + the operation will fail. If not specified, a port + will be allocated if this Service requires one. If + this field is specified when creating a Service + which does not need it, creation will fail. This + field will be wiped when updating a Service to no + longer need it (e.g. changing type from NodePort + to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this + service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + description: IntOrString is a type that can hold an + int32 or a string. When used in JSON or YAML marshalling + and unmarshalling, it produces or consumes the inner + type. This allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + shards: + type: object + description: 'Configuration for the shards services + + ' + properties: + primaries: + type: object + description: 'Configure the shards service to any primary + in the shards with the name as the SGShardedCluster plus + the `-shards` suffix. + + + It provides a stable connection (regardless of primary + failures or switchovers) to read-write Postgres servers + of any shard cluster. Read-write servers are load-balanced + via this service. + + + See also https://kubernetes.io/docs/concepts/services-networking/service/ + + ' + properties: + enabled: + type: boolean + description: Specify if the service should be created + or not. + type: + type: string + enum: + - ClusterIP + - LoadBalancer + - NodePort + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid + + options are ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates + + a cluster-internal IP address for load-balancing to + endpoints. + + "NodePort" builds on ClusterIP and allocates a port + on every node. + + "LoadBalancer" builds on NodePort and creates + + an external load-balancer (if supported in the current + cloud). + + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + + ' + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if + NodePorts will be automatically allocated for services + with type LoadBalancer. Default is "true". It may + be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests + will be respected, regardless of this field. This + field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any + other type. + type: boolean + externalIPs: + description: 'externalIPs is a list of IP addresses + for which nodes in the cluster will also accept traffic + for this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes + system. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#allocateloadbalancernodeports-v1-core' + items: + type: string + type: array + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes + distribute service traffic they receive on one of + the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", + the proxy will configure the service in a way that + assumes that external load balancers will take care + of balancing the service traffic between nodes, and + so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the + client source IP. (Traffic mistakenly sent to a node + with no endpoints will be dropped.) The default value, + "Cluster", uses the standard behavior of routing to + all endpoints evenly (possibly modified by topology + and other features). Note that traffic sent to an + External IP or LoadBalancer IP from within the cluster + will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is + set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, + a value will be automatically allocated. External + systems (e.g. load-balancers) can use this port to + determine if a given node holds endpoints for this + service or not. If this field is specified when creating + a Service which does not need it, creation will fail. + This field will be wiped when updating a Service to + no longer need it (e.g. changing type). This field + cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes + distribute service traffic they receive on the ClusterIP. + If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the + same node as the pod, dropping the traffic if there + are no local endpoints. The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + type: string + ipFamilies: + description: 'IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is + usually assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the + cluster, and ipFamilyPolicy allows it, it will be + used; otherwise creation of the service will fail. + This field is conditionally mutable: it allows for + adding or removing a secondary IP family, but it does + not allow changing the primary IP family of the Service. + Valid values are "IPv4" and "IPv6". This field only + applies to Services of types ClusterIP, NodePort, + and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to + type ExternalName. + + + This field may hold a maximum of two entries (dual-stack + families, in either order). These families must correspond + to the values of the clusterIPs field, if specified. + Both clusterIPs and ipFamilies are governed by the + ipFamilyPolicy field.' + items: + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is + no value provided, then this field will be set to + SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters), or "RequireDualStack" (two + IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend + on the value of this field. This field will be wiped + when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If + specified, the value of this field must be a label-style + identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are + reserved for end-users. This field can only be set + when the Service type is 'LoadBalancer'. If not set, + the default load balancer implementation is used, + today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any + default load balancer implementation (e.g. cloud providers) + should ignore Services that set this field. This field + can only be set when creating or updating a Service + to type 'LoadBalancer'. Once set, it can not be changed. + This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load + balancer is created. This field will be ignored if + the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies + across implementations. Using it is non-portable and + it may not support dual-stack. Users are encouraged + to use implementation-specific annotations when available.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified + client IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that + any agent which deals with endpoints for this Service + should disregard any indications of ready/not-ready. + The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV + DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints + and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" + even if the Pods themselves are not. Agents which + consume only Kubernetes generated endpoints through + the Endpoints or EndpointSlice resources can safely + assume this behavior. + type: boolean + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to + maintain session affinity. Enable client IP based + session affinity. Must be ClientIP or None. Defaults + to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: 'SessionAffinityConfig represents the configurations + of session affinity. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#sessionaffinityconfig-v1-core' + properties: + clientIP: + description: ClientIPConfig represents the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The + value must be >0 && <=86400(for 1 day) if + ServiceAffinity == "ClientIP". Default value + is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + nodePorts: + type: object + description: nodePorts is a list of ports for exposing + a cluster services to the outside world + properties: + pgport: + type: integer + description: the node port that will be exposed + to connect to Postgres instance + replicationport: + type: integer + description: the node port that will be exposed + to connect to Postgres instance for replication + purpose + customPorts: + type: array + description: "The list of custom ports that will be exposed\ + \ by the shards services.\n\nThe names of custom ports\ + \ will be prefixed with the string `c-` so they do not\n\ + \ conflict with ports defined for the shards services.\n\ + \nThe names of target ports will be prefixed with the\ + \ string `c-` so that the ports\n that can be referenced\ + \ in this section will be only those defined under\n \ + \ .spec.pods.customContainers[].ports sections were names\ + \ are also prepended with the same\n prefix.\n\n**Changing\ + \ this field may require a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + items: + description: "A custom port that will be exposed by the\ + \ Postgres shards services.\n\nThe name of the custom\ + \ port will be prefixed with the string `c-` so it does\ + \ not\n conflict with ports defined for the shards\ + \ services.\n\nThe name of target port will be prefixed\ + \ with the string `c-` so that the port\n that can\ + \ be referenced in this section will be only those defined\ + \ under\n .spec.pods.customContainers[].ports sections\ + \ were names are also prepended with the same\n prefix.\n\ + \ \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#serviceport-v1-core\n" + properties: + appProtocol: + description: "The application protocol for this port.\ + \ This is used as a hint for implementations to\ + \ offer richer behavior for protocols that they\ + \ understand. This field follows standard Kubernetes\ + \ label syntax. Valid values are either:\n\n* Un-prefixed\ + \ protocol names - reserved for IANA standard service\ + \ names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\ + \n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c'\ + \ - HTTP/2 prior knowledge over cleartext as described\ + \ in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n\ + \ * 'kubernetes.io/ws' - WebSocket over cleartext\ + \ as described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \ * 'kubernetes.io/wss' - WebSocket over TLS as\ + \ described in https://www.rfc-editor.org/rfc/rfc6455\n\ + \n* Other protocols should use implementation-defined\ + \ prefixed names such as mycompany.com/my-custom-protocol." + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field + in the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this + service is exposed when type is NodePort or LoadBalancer. Usually + assigned by the system. If a value is specified, + in-range, and not in use it will be used, otherwise + the operation will fail. If not specified, a port + will be allocated if this Service requires one. If + this field is specified when creating a Service + which does not need it, creation will fail. This + field will be wiped when updating a Service to no + longer need it (e.g. changing type from NodePort + to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this + service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + description: IntOrString is a type that can hold an + int32 or a string. When used in JSON or YAML marshalling + and unmarshalling, it produces or consumes the inner + type. This allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + configurations: + type: object + description: "Sharded cluster custom configurations.\n\n**Example:**\n\ + \n``` yaml\napiVersion: stackgres.io/v1alpha1\nkind: SGShardedCluster\n\ + metadata:\n name: stackgres\nspec:\n configurations:\n backups:\n\ + \ - sgObjectStorage: 'backupconf'\n```\n" + properties: + observability: + type: object + description: Allow to specify Observability configuration (related + to logs, metrics and traces) + properties: + disableMetrics: + type: boolean + description: 'If set to `true`, avoids creating the Prometheus + exporter sidecar. Recommended when there''s no intention + to use internal monitoring. + + + **Changing this field may require a restart.** + + ' + default: false + receiver: + type: string + description: Indicate the receiver in the configuration + for the collector scraper (if not specified, will default + to prometheus). + default: prometheus + prometheusAutobind: + type: boolean + description: If set to `true`, a PodMonitor is created for + each Prometheus instance as specified in the SGConfig.spec.collector.prometheusOperator.monitors + section. + default: false + backups: + type: array + description: 'List of sharded backups configurations for this + SGShardedCluster + + ' + items: + type: object + description: 'Sharded backup configuration for this SGShardedCluster + + ' + required: + - sgObjectStorage + properties: + compression: + type: string + description: 'Specifies the backup compression algorithm. + Possible options are: lz4, lzma, brotli. The default + method is `lz4`. LZ4 is the fastest method, but compression + ratio is the worst. LZMA is way slower, but it compresses + backups about 6 times better than LZ4. Brotli is a good + trade-off between speed and compression ratio, being + about 3 times better than LZ4. + + ' + enum: + - lz4 + - lzma + - brotli + cronSchedule: + type: string + description: 'Continuous Archiving backups are composed + of periodic *base backups* and all the WAL segments + produced in between those base backups for the coordinator + and each shard. This parameter specifies at what time + and with what frequency to start performing a new base + backup. + + + Use cron syntax (`m h dom mon dow`) for this parameter, + i.e., 5 values separated by spaces: + + * `m`: minute, 0 to 59. + + * `h`: hour, 0 to 23. + + * `dom`: day of month, 1 to 31 (recommended not to + set it higher than 28). + + * `mon`: month, 1 to 12. + + * `dow`: day of week, 0 to 7 (0 and 7 both represent + Sunday). + + + Also ranges of values (`start-end`), the symbol `*` + (meaning `first-last`) or even `*/N`, where `N` is a + number, meaning ""every `N`, may be used. All times + are UTC. It is recommended to avoid 00:00 as base backup + time, to avoid overlapping with any other external operations + happening at this time. + + + If not set, full backups are never performed automatically. + + ' + performance: + type: object + description: 'Configuration that affects the backup network + and disk usage performance. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth used + when storing a backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + uploadDiskConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to store the data. This parameter configures + the number of parallel streams to use to reading + from disk. By default, it''s set to 1. + + ' + uploadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to store the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to 16. + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to read the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to the minimum between the number of file + to read and 10. + + ' + retention: + type: integer + minimum: 1 + description: 'When an automatic retention policy is defined + to delete old base backups, this parameter specifies + the number of base backups to keep, in a sliding window. + + + Consequently, the time range covered by backups is `periodicity*retention`, + where `periodicity` is the separation between backups + as specified by the `cronSchedule` property. + + + Default is 5. + + ' + sgObjectStorage: + type: string + description: 'Name of the [SGObjectStorage](https://stackgres.io/doc/latest/reference/crd/sgobjectstorage) + to use for the cluster. It defines the location in which + the the backups will be stored. + + ' + paths: + type: array + items: + type: string + description: "The paths were the backups are stored. If\ + \ not set this field is filled up by the operator.\n\ + \nWhen provided will indicate were the backups and WAL\ + \ files will be stored.\n\nThe first path indicate the\ + \ coordinator path and the other paths indicate the\ + \ shards paths\n\n> **WARNING**: Most users should leave\ + \ this field empty since having it manually set could\ + \ be dangerous. If the value is repeated due to re-creating\ + \ an SGShardedCluster or\n re-using the same value in\ + \ another SGShardedCluster and you may get a mixed WAL\ + \ history with unrecoverable backups.\n" + useVolumeSnapshot: + type: boolean + description: 'If specified SGBackup will use VolumeSnapshot + to create backups. + + + This functionality still require to store WAL files + in an SGObjectStorage but could result in much faster + backups and restore of those backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + volumeSnapshotClass: + type: string + description: 'The name of the VolumeSnaphostClass to use + to create the VolumeSnapshot for backups. + + + See also https://kubernetes.io/docs/concepts/storage/volume-snapshots/ + + ' + fastVolumeSnapshot: + type: boolean + description: 'If specified SGBackup will create a backup + forcing a fast start (by setting parameter `fast` to + `true` when calling `pg_backup_start`) that will reduce + the time the backups may take at the expense of more + IO usage. + + + See also https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-LOWLEVEL-BASE-BACKUP + + ' + timeout: + type: integer + description: 'Allow to set a timeout for the backup creation. + + + If not set it will be disabled and the backup operation + will continue until the backup completes or fail. If + set to 0 is the same as not being set. + + + Make sure to set a reasonable high value in order to + allow for any unexpected delays during backup creation + (network low bandwidth, disk low throughput and so forth). + + ' + reconciliationTimeout: + type: integer + default: 300 + description: "Allow to set a timeout for the reconciliation\ + \ process that take place after the backup.\n\nIf not\ + \ set defaults to 300 (5 minutes). If set to 0 it will\ + \ disable timeout.\n\nFailure of reconciliation will\ + \ not make the backup fail and will be re-tried the\ + \ next time a SGBackup\n or shecduled backup Job take\ + \ place.\n" + retainWalsForUnmanagedLifecycle: + type: boolean + description: 'If specified, WAL created after any unmanaged + lifecycle backups will be retained. + + ' + credentials: + type: object + description: 'Allow to specify custom credentials for Postgres + users and Patroni REST API + + + **Changing this field may require a restart.** + + ' + properties: + patroni: + type: object + description: 'Kubernetes [SecretKeySelectors](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials for patroni REST API. + + + **Changing this field may require a restart.** + + ' + properties: + restApiPassword: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password for the patroni REST API. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + users: + type: object + description: "Kubernetes [SecretKeySelectors](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core)\ + \ that contains the credentials of the users.\n\n**Changing\ + \ this field may require a manual modification of the\ + \ database users to reflect the new values specified.**\n\ + \nIn particular you may have to create those users if\ + \ username is changed or alter password if it is changed.\ + \ Here are the SQL commands to perform such operation\ + \ (replace\n default usernames with the new ones and\ + \ `***` with their respective passwords):\n\n* Superuser\ + \ username changed:\n```\nCREATE ROLE postgres;\n```\n\ + * Superuser password changed:\n```\nALTER ROLE postgres\ + \ WITH SUPERUSER INHERIT CREATEROLE CREATEDB LOGIN REPLICATION\ + \ BYPASSRLS PASSWORD '***';\n```\n* Replication username\ + \ changed:\n```\nCREATE ROLE replicator;\n```\n* Replication\ + \ password changed:\n```\nALTER ROLE replicator WITH NOSUPERUSER\ + \ INHERIT NOCREATEROLE NOCREATEDB LOGIN REPLICATION NOBYPASSRLS\ + \ PASSWORD '***';\n```\n* Authenticator username changed:\n\ + ```\nCREATE ROLE authenticator;\n```\n* Authenticator\ + \ password changed:\n```\nALTER ROLE authenticator WITH\ + \ SUPERUSER INHERIT NOCREATEROLE NOCREATEDB LOGIN NOREPLICATION\ + \ NOBYPASSRLS PASSWORD '***';\n```\n\n**Changing this\ + \ field may require a restart.**\n" + properties: + superuser: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the superuser (usually + the postgres user). + + ' + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + replication: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the replication user + used to replicate from the primary cluster and from + replicas of this cluster. + + ' + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + authenticator: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the credentials of the authenticator + user used by pgbouncer to authenticate other users. + + ' + properties: + username: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select + from. Must be a valid secret key. + binding: + type: object + description: "This section allows to specify the properties\ + \ of [Service Binding spec for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service).\n\ + \ If not specified, then some default will be used.\n\nFor\ + \ more information see https://servicebinding.io/spec/core/1.0.0/\n" + properties: + provider: + type: string + description: It's the reference of custom provider name. + If not specified, then the default value will be `stackgres` + database: + type: string + description: Allow to specify the database name. If not + specified, then the default value is `postgres` + username: + type: string + description: Allow to specify the username. If not specified, + then the superuser username will be used. + password: + type: object + description: Allow to reference Secret that contains the + user's password. If not specified, then the superuser + password will be used. + properties: + name: + type: string + description: The name of the Secret + key: + type: string + description: The key of the Secret + metadata: + type: object + description: Metadata information from any cluster created resources. + properties: + annotations: + type: object + description: "Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)\ + \ to be passed to resources created and managed by StackGres.\n\ + \n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1alpha1\n\ + kind: SGShardedCluster\nmetadata:\n name: stackgres\nspec:\n\ + \ metadata:\n annotations:\n clusterPods:\n \ + \ customAnnotations: customAnnotationValue\n primaryService:\n\ + \ customAnnotations: customAnnotationValue\n replicasService:\n\ + \ customAnnotations: customAnnotationValue\n```\n" + properties: + allResources: + type: object + description: Annotations to attach to any resource created + or managed by StackGres. + additionalProperties: + type: string + clusterPods: + type: object + description: Annotations to attach to pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Annotations to attach to all services created + or managed by StackGres. + additionalProperties: + type: string + primaryService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-primary` service. + additionalProperties: + type: string + replicasService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-replicas` service. + additionalProperties: + type: string + labels: + type: object + description: "Custom Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)\ + \ to be passed to resources created and managed by StackGres.\n\ + \n**Example:**\n\n```yaml\napiVersion: stackgres.io/v1alpha1\n\ + kind: SGShardedCluster\nmetadata:\n name: stackgres\nspec:\n\ + \ metadata:\n labels:\n clusterPods:\n customLabel:\ + \ customLabelValue\n services:\n customLabel:\ + \ customLabelValue\n```\n" + properties: + clusterPods: + type: object + description: Labels to attach to Pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Labels to attach to Services and Endpoints + created or managed by StackGres. + additionalProperties: + type: string + coordinator: + type: object + description: 'The coordinator is a StackGres cluster responsible + of coordinating data storage and access from the shards. + + ' + required: + - instances + - pods + properties: + instances: + type: integer + minimum: 0 + description: "Number of StackGres instances for the cluster.\ + \ Each instance contains one Postgres server.\n Out of all\ + \ of the Postgres servers, one is elected as the primary,\ + \ the rest remain as read-only replicas.\n\nIf sharding type\ + \ is `shardingsphere` then, instead of an SGCluster a ComputeNode\ + \ will be created.\n\nSee also https://shardingsphere.apache.org/oncloud/current/en/user-manual/cn-sn-operator/#computenode\ + \ \n" + autoscaling: + type: object + description: 'This section allows to configure vertical Pod + autoscaling for the SGCluster''s Pods. + + + Vertical Pod Autoscaling will use cpu and memory usage as + the metric to control the upscale or downscale of the Pod + requests and limits resources. + + Vertical Pod Autoscaling requires the [Vertical Pod Autoscaler + operator](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) + to be installed in the Kuberentes cluster. + + ' + properties: + mode: + type: string + description: 'Allow to enable or disable any of horizontal + and vertical Pod autoscaling. + + + Possible values are: + + * `vertical`: only vertical Pod autoscaling will be enabled + (default) + + * `none`: all autoscaling will be disabled + + ' + enum: + - vertical + - none + default: vertical + minAllowed: + type: object + description: 'Allow to define the lower bound for Pod resources + of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the lower bound for Pod + resources of patroni container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the patroni + container + memory: + type: string + description: The minimum allowed memory for the + patroni container + pgbouncer: + type: object + description: 'Allow to define the lower bound for Pod + resources of pgbouncer container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the pgbouncer + container + memory: + type: string + description: The minimum allowed memory for the + pgbouncer container + envoy: + type: object + description: 'Allow to define the lower bound for Pod + resources of envoy container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the envoy + container + memory: + type: string + description: The minimum allowed memory for the + envoy container + maxAllowed: + type: object + description: 'Allow to define the higher bound for Pod resources + of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the higher bound for Pod + resources of patroni container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the patroni + container + memory: + type: string + description: The maximum allowed memory for the + patroni container + pgbouncer: + type: object + description: 'Allow to define the higher bound for Pod + resources of pgbouncer container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the pgbouncer + container + memory: + type: string + description: The maximum allowed memory for the + pgbouncer container + envoy: + type: object + description: 'Allow to define the higher bound for Pod + resources of envoy container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the envoy + container + memory: + type: string + description: The maximum allowed memory for the + envoy container + horizontal: + type: object + description: 'Section to configure horizontal Pod autoscaling + aspects. + + ' + properties: + eplicasConnectionsUsageTarget: + type: string + description: 'The target value for replicas connections + used in order to trigger the upscale of replica instances. + + ' + default: '0.8' + replicasConnectionsUsageMetricType: + type: string + description: 'The metric type for connections used metric. + See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + default: AverageValue + cooldownPeriod: + type: integer + description: 'The period in seconds before the downscale + of replica instances can be triggered. + + ' + default: 300 + pollingInterval: + type: integer + description: 'The interval in seconds to check if the + scaleup or scaledown have to be triggered. + + ' + default: 30 + vertical: + type: object + description: 'Section to configure vertical Pod autoscaling + aspects. + + ' + properties: + recommender: + type: string + description: 'Recommender responsible for generating + recommendation for vertical Pod autoscaling. If not + specified the default one will be used. + + ' + sgInstanceProfile: + type: string + description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/reference/crd/sginstanceprofile/). + + + A SGInstanceProfile defines CPU and memory limits. Must exist + before creating a cluster. + + + When no profile is set, a default (1 core, 2 GiB RAM) one + is used. + + + **Changing this field may require a restart.** + + ' + managedSql: + type: object + description: "This section allows to reference SQL scripts that\ + \ will be applied to the cluster live.\n\nIf sharding type\ + \ is `shardingsphere` then this section will be applied to\ + \ the first cluster shard.\n In this case the database postgres\ + \ will also provide a foreign server called `shardingsphere`\ + \ and\n the superuser user mappings that will allow to run\ + \ DistQL queries using command like the following:\n\n```\n\ + SELECT * FROM dblink('shardingsphere', 'SHOW STORAGE UNITS')\n\ + \ AS _(name text, type text, host text, port int, db text,\n\ + \ connection_timeout_milliseconds int, idle_timeout_milliseconds\ + \ int,\n max_lifetime_milliseconds int, max_pool_size int,\ + \ min_pool_size int,\n read_only boolean, other_attributes\ + \ text);\n```\n\nSee https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-proxy/distsql\n" + properties: + continueOnSGScriptError: + type: boolean + description: If true, when any entry of any `SGScript` fail + will not prevent subsequent `SGScript` from being executed. + By default is `false`. + scripts: + type: array + description: 'A list of script references that will be executed + in sequence. + + ' + items: + type: object + description: "A script reference. Each version of each\ + \ entry of the script referenced will be executed exactly\ + \ once following the sequence defined\n in the referenced\ + \ script and skipping any script entry that have already\ + \ been executed.\n" + properties: + id: + type: integer + description: The id is immutable and must be unique + across all the `SGScript` entries. It is replaced + by the operator and is used to identify the `SGScript` + entry. + sgScript: + type: string + description: A reference to an `SGScript` + pods: + type: object + description: 'Cluster pod''s configuration. + + + If sharding type is `shardingsphere` then this section will + apply to the ComputeNode. + + ' + required: + - persistentVolume + properties: + persistentVolume: + type: object + description: 'Pod''s persistent volume configuration. + + + If sharding type is `shardingsphere` then this section + is ignored. + + ' + required: + - size + properties: + size: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$ + description: 'Size of the PersistentVolume set for each + instance of the cluster. This size is specified either + in Mebibytes, Gibibytes or Tebibytes (multiples of + 2^20, 2^30 or 2^40, respectively). + + + If sharding type is `shardingsphere` then this field + is ignored. + + ' + storageClass: + type: string + description: 'Name of an existing StorageClass in the + Kubernetes cluster, used to create the PersistentVolumes + for the instances of the cluster. + + + If sharding type is `shardingsphere` then this field + is ignored. + + ' + disableConnectionPooling: + type: boolean + description: 'If set to `true`, avoids creating a connection + pooling (using [PgBouncer](https://www.pgbouncer.org/)) + sidecar. + + + If sharding type is `shardingsphere` then this field is + ignored. + + + **Changing this field may require a restart.** + + ' + disableMetricsExporter: + type: boolean + description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + disablePostgresUtil: + type: boolean + description: 'If set to `true`, avoids creating the `postgres-util` + sidecar. This sidecar contains usual Postgres administration + utilities *that are not present in the main (`patroni`) + container*, like `psql`. Only disable if you know what + you are doing. + + + If sharding type is `shardingsphere` then this field is + ignored. + + + **Changing this field may require a restart.** + + ' + disableEnvoy: + type: boolean + description: 'If set to `true`, avoids creating the `envoy` + sidecar. This sidecar is used as the endge proxy for the + cluster''s Pods providing extra metrics to the monitoring + layer. + + + **Changing this field may require a restart.** + + ' + resources: + type: object + description: Pod custom resources configuration. + properties: + enableClusterLimitsRequirements: + type: boolean + description: 'When enabled resource limits for containers + other than the patroni container wil be set just like + for patroni contianer as specified in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + disableResourcesRequestsSplitFromTotal: + type: boolean + description: "When set to `true` the resources requests\ + \ values in fields `SGInstanceProfile.spec.requests.cpu`\ + \ and `SGInstanceProfile.spec.requests.memory` will\ + \ represent the resources\n requests of the patroni\ + \ container and the total resources requests calculated\ + \ by adding the resources requests of all the containers\ + \ (including the patroni container).\n\n**Changing\ + \ this field may require a restart.**\n" + failWhenTotalIsHigher: + type: boolean + description: "When set to `true` the reconciliation\ + \ of the cluster will fail if `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of the\ + \ CPU or memory\n of all the containers except patroni\ + \ is equals or higher than the total specified in\ + \ `SGInstanceProfile.spec.requests.cpu` or `SGInstanceProfile.spec.requests.memory`.\n\ + \nWhen `false` (the default) and `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of the\ + \ CPU or memory\n of all the containers except patroni\ + \ is equals or higher than the total specified in\ + \ `SGInstanceProfile.spec.requests.cpu` or `SGInstanceProfile.spec.requests.memory`\n\ + \ then the patroni container resources will be set\ + \ to 0.\n" + scheduling: + type: object + description: 'Pod custom scheduling, affinity and topology + spread constratins configuration. + + + **Changing this field may require a restart.** + + ' + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must + be true for the pod to fit on a node. Selector which + must match a node''s labels for the pod to be scheduled + on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to + tolerates any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; + this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and + Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the + period of time the toleration (which must be + of effect NoExecute, otherwise this field is + ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node matches the corresponding + matchExpressions; the node(s) with the highest + sum are the most preferred. + items: + description: An empty preferred scheduling term + matches all objects with implicit weight 0 (i.e. + it's a no-op). A null preferred scheduling term + matches no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union + of the results of one or more label queries over + a set of nodes; that is, it represents the OR + of the selectors represented by the node selector + terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. + "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate the highest + priorities with the former being the highest priority. + Any other name must be defined by creating a PriorityClass + object with that name. If not specified, the pod priority + will be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. If + the affinity requirements specified by this field + cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may + or may not try to eventually evict the pod from + its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter + pod anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. If + the anti-affinity requirements specified by this + field cease to be met at some point during pod + execution (e.g. due to a pod label update), the + system may or may not try to eventually evict + the pod from its node. When there are multiple + elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + topologySpreadConstraints: + description: 'TopologySpreadConstraints describes how + a group of pods ought to spread across topology domains. + Scheduler will schedule pods in a way which abides + by the constraints. All topologySpreadConstraints + are ANDed. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#topologyspreadconstraint-v1-core' + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: 'MatchLabelKeys is a set of pod label + keys to select the pods over which spreading + will be calculated. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are ANDed with labelSelector to select + the group of existing pods over which spreading + will be calculated for the incoming pod. The + same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. MatchLabelKeys cannot be + set when LabelSelector isn''t set. Keys that + don''t exist in the incoming pod labels will + be ignored. A null or empty list means only + match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default).' + items: + type: string + type: array + maxSkew: + description: 'MaxSkew describes the degree to + which pods may be unevenly distributed. When + `whenUnsatisfiable=DoNotSchedule`, it is the + maximum permitted difference between the number + of matching pods in the target topology and + the global minimum. The global minimum is the + minimum number of matching pods in an eligible + domain or zero if the number of eligible domains + is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with + the same labelSelector spread as 2/2/1: In this + case, the global minimum is 1. | zone1 | zone2 + | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default + value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: 'MinDomains indicates a minimum number + of eligible domains. When the number of eligible + domains with matching topology keys is less + than minDomains, Pod Topology Spread treats + "global minimum" as 0, and then the calculation + of Skew is performed. And when the number of + eligible domains with matching topology keys + equals or greater than minDomains, this value + has no effect on scheduling. As a result, when + the number of eligible domains is less than + minDomains, scheduler won''t schedule more than + maxSkew Pods to those domains. If value is nil, + the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than + 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is + set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 + | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), + so "global minimum" is treated as 0. In this + situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will + be 3(3 - 0) if new Pod is scheduled to any of + the three zones, it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default).' + format: int32 + type: integer + nodeAffinityPolicy: + description: 'NodeAffinityPolicy indicates how + we will treat Pod''s nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options + are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: + nodeAffinity/nodeSelector are ignored. All nodes + are included in the calculations. + + + If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + nodeTaintsPolicy: + description: 'NodeTaintsPolicy indicates how we + will treat node taints when calculating pod + topology spread skew. Options are: - Honor: + nodes without taints, along with tainted nodes + for which the incoming pod has a toleration, + are included. - Ignore: node taints are ignored. + All nodes are included. + + + If this value is nil, the behavior is equivalent + to the Ignore policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", + and try to put balanced number of pods into + each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible + domain as a domain whose nodes meet the requirements + of nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, + if TopologyKey is "topology.kubernetes.io/zone", + each zone is a domain of that topology. It's + a required field. + type: string + whenUnsatisfiable: + description: "WhenUnsatisfiable indicates how\ + \ to deal with a pod if it doesn't satisfy the\ + \ spread constraint. - DoNotSchedule (default)\ + \ tells the scheduler not to schedule it. -\ + \ ScheduleAnyway tells the scheduler to schedule\ + \ the pod in any location,\n but giving higher\ + \ precedence to topologies that would help reduce\ + \ the\n skew.\nA constraint is considered \"\ + Unsatisfiable\" for an incoming pod if and only\ + \ if every possible node assignment for that\ + \ pod would violate \"MaxSkew\" on some topology.\ + \ For example, in a 3-zone cluster, MaxSkew\ + \ is set to 1, and pods with the same labelSelector\ + \ spread as 3/1/1: | zone1 | zone2 | zone3 |\ + \ | P P P | P | P | If WhenUnsatisfiable\ + \ is set to DoNotSchedule, incoming pod can\ + \ only be scheduled to zone2(zone3) to become\ + \ 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3)\ + \ satisfies MaxSkew(1). In other words, the\ + \ cluster can still be imbalanced, but scheduler\ + \ won't make it *more* imbalanced. It's a required\ + \ field." + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + backup: + type: object + description: Backup Pod custom scheduling and affinity + configuration. + properties: + nodeSelector: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the + union of the results of one or more label + queries over a set of nodes; that is, it represents + the OR of the selectors represented by the + node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + tolerations: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the + union of the results of one or more label + queries over a set of nodes; that is, it represents + the OR of the selectors represented by the + node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + nodeAffinity: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the + union of the results of one or more label + queries over a set of nodes; that is, it represents + the OR of the selectors represented by the + node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. + "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate the highest + priorities with the former being the highest priority. + Any other name must be defined by creating a PriorityClass + object with that name. If not specified, the pod + priority will be default or zero if there is no + default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod + affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest + sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set + of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key in (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key notin (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to a pod label update), + the system may or may not try to eventually + evict the pod from its node. When there are + multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter + pod anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity + expressions specified by this field, but it + may choose a node that violates one or more + of the expressions. The node that is most + preferred is the one with the greatest sum + of weights, i.e. for each node that meets + all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most + preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set + of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key in (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key notin (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto the + node. If the anti-affinity requirements specified + by this field cease to be met at some point + during pod execution (e.g. due to a pod label + update), the system may or may not try to + eventually evict the pod from its node. When + there are multiple elements, the lists of + nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + managementPolicy: + type: string + description: "managementPolicy controls how pods are created\ + \ during initial scale up, when replacing pods\n on nodes,\ + \ or when scaling down. The default policy is `OrderedReady`,\ + \ where pods are created\n in increasing order (pod-0,\ + \ then pod-1, etc) and the controller will wait until\ + \ each pod is\n ready before continuing. When scaling\ + \ down, the pods are removed in the opposite order.\n\ + \ The alternative policy is `Parallel` which will create\ + \ pods in parallel to match the desired\n scale without\ + \ waiting, and on scale down will delete all pods at once.\n\ + \nIf sharding type is `shardingsphere` then this field\ + \ is ignored.\n" + customVolumes: + type: array + description: "A list of custom volumes that may be used\ + \ along with any container defined in\n customInitContainers\ + \ or customContainers sections for the coordinator.\n\n\ + The name used in this section will be prefixed with the\ + \ string `c-` so that when\n referencing them in the\ + \ customInitContainers or customContainers sections the\ + \ name used\n have to be prepended with the same prefix.\n\ + \nOnly the following volume types are allowed: configMap,\ + \ downwardAPI, emptyDir,\n gitRepo, glusterfs, hostPath,\ + \ nfs, projected and secret\n\n**Changing this field may\ + \ require a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + items: + type: object + description: "A custom volume that may be used along with\ + \ any container defined in\n customInitContainers or\ + \ customContainers sections.\n\nThe name used in this\ + \ section will be prefixed with the string `c-` so that\ + \ when\n referencing them in the customInitContainers\ + \ or customContainers sections the name used\n have\ + \ to be prepended with the same prefix.\n\nOnly the\ + \ following volume types are allowed: configMap, downwardAPI,\ + \ emptyDir,\n gitRepo, glusterfs, hostPath, nfs, projected\ + \ and secret\n\n**Changing this field may require a\ + \ restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + properties: + name: + description: 'name of the custom volume. The name + will be implicitly prefixed with `c-` to avoid clashing + with internal operator volume names. Must be a DNS_LABEL + and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + ' + type: string + configMap: + description: 'Adapts a ConfigMap into a volume. + + + The contents of the target ConfigMap''s Data field + will be presented in a volume as files using the + keys in the Data field as the file names, unless + the items element is populated with specific mappings + of keys to paths. ConfigMap volumes support ownership + management and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#configmapvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: 'DownwardAPIVolumeSource represents a + volume containing downward API info. Downward API + volumes support ownership management and SELinux + relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#downwardapivolumesource-v1-core' + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and + their output format + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to\ + \ String() and AsInt64() accessors.\n\ + \nThe serialization format is:\n\n\ + ``` ::= \n\ + \n\t(Note that may be empty,\ + \ from the \"\" case in .)\n\ + \n ::= 0 | 1 | ...\ + \ | 9 ::= \ + \ | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki | Mi |\ + \ Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" |\ + \ k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I\ + \ didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo\ + \ matter which of the three exponent\ + \ forms is used, no quantity may represent\ + \ a number greater than 2^63-1 in\ + \ magnitude, nor may it have more\ + \ than 3 decimal places. Numbers larger\ + \ or more precise will be capped or\ + \ rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended\ + \ in the future if we require larger\ + \ or smaller quantities.\n\nWhen a\ + \ Quantity is parsed from a string,\ + \ it will remember the type of suffix\ + \ it had, and will use the same type\ + \ again when it is serialized.\n\n\ + Before serializing, Quantity will\ + \ be put in \"canonical form\". This\ + \ means that Exponent/suffix will\ + \ be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa)\ + \ such that:\n\n- No precision is\ + \ lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe\ + \ sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"\ + 1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented\ + \ by a floating point number. That\ + \ is the whole point of this exercise.\n\ + \nNon-canonical values will still\ + \ parse as long as they are well formed,\ + \ but will be re-emitted in their\ + \ canonical form. (So always use canonical\ + \ form, or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code\ + \ in the hopes that that will cause\ + \ implementors to also use a fixed\ + \ point implementation." + type: string + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'Represents an empty directory for a + pod. Empty directory volumes support ownership management + and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#emptydirvolumesource-v1-core' + properties: + medium: + description: 'medium represents what type of storage + medium should back this directory. The default + is "" which means to use the node''s default + medium. Must be an empty string (default) or + Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::= \n\ + \n\t(Note that may be empty, from the\ + \ \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | . |\ + \ . | . ::=\ + \ \"+\" | \"-\" ::= \ + \ | ::= \ + \ | | \ + \ ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G\ + \ | T | P | E\n\n\t(Note that 1024 = 1Ki but\ + \ 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter which\ + \ of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1\ + \ in magnitude, nor may it have more than 3\ + \ decimal places. Numbers larger or more precise\ + \ will be capped or rounded up. (E.g.: 0.1m\ + \ will rounded up to 1m.) This may be extended\ + \ in the future if we require larger or smaller\ + \ quantities.\n\nWhen a Quantity is parsed from\ + \ a string, it will remember the type of suffix\ + \ it had, and will use the same type again when\ + \ it is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This means\ + \ that Exponent/suffix will be adjusted up or\ + \ down (with a corresponding increase or decrease\ + \ in Mantissa) such that:\n\n- No precision\ + \ is lost - No fractional digits will be emitted\ + \ - The exponent (or suffix) is as large as\ + \ possible.\n\nThe sign will be omitted unless\ + \ the number is negative.\n\nExamples:\n\n-\ + \ 1.5 will be serialized as \"1500m\" - 1.5Gi\ + \ will be serialized as \"1536Mi\"\n\nNote that\ + \ the quantity will NEVER be internally represented\ + \ by a floating point number. That is the whole\ + \ point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well\ + \ formed, but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or don't\ + \ diff.)\n\nThis format is intended to make\ + \ it difficult to use these numbers without\ + \ writing some sort of special handling code\ + \ in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + type: object + gitRepo: + description: 'Represents a volume that is populated + with the contents of a git repository. Git repo + volumes do not support ownership management. Git + repo volumes support SELinux relabeling. + + + DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into + an InitContainer that clones the repo using git, + then mount the EmptyDir into the Pod''s container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#gitrepovolumesource-v1-core' + properties: + directory: + description: directory is the target directory + name. Must not contain or start with '..'. If + '.' is supplied, the volume directory will be + the git repository. Otherwise, if specified, + the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Represents a Glusterfs mount that lasts + the lifetime of a pod. Glusterfs volumes do not + support ownership management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#glusterfsvolumesource-v1-core' + properties: + endpoints: + description: 'endpoints is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs + volume to be mounted with read-only permissions. + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'Represents a host path mapped into a + pod. Host path volumes do not support ownership + management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#hostpathvolumesource-v1-core' + properties: + path: + description: 'path of the directory on the host. + If the path is a symlink, it will follow the + link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults + to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + nfs: + description: 'Represents an NFS mount that lasts the + lifetime of a pod. NFS volumes do not support ownership + management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nfsvolumesource-v1-core' + properties: + path: + description: 'path that is exported by the NFS + server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS + export to be mounted with read-only permissions. + Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address + of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - server + - path + type: object + projected: + description: 'Represents a projected volume source + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#projectedvolumesource-v1-core' + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires + decimal values for mode bits. Directories within + the path are not affected by this setting. This + might be in conflict with other options that + affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundleProjection + describes how to select a set of ClusterTrustBundle + objects and project their contents into + the pod filesystem. + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The + result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector + requirement is a selector that + contains values, a key, and + an operator that relates the + key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In or + NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be + empty. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single + {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator is + "In", and the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: If true, don't block pod + startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, + then the named ClusterTrustBundle + is allowed not to exist. If using + signerName, then the combination of + signerName and labelSelector is allowed + to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the + volume root to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles + that match this signer name. Mutually-exclusive + with name. The contents of all selected + ClusterTrustBundles will be unified + and deduplicated. + type: string + required: + - path + type: object + configMap: + description: 'Adapts a ConfigMap into a + projected volume. + + + The contents of the target ConfigMap''s + Data field will be presented in a projected + volume as files using the keys in the + Data field as the file names, unless the + items element is populated with specific + mappings of keys to paths. Note that this + is identical to a configmap volume source + without the default mode.' + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced ConfigMap will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will + be projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not + present in the ConfigMap, the volume + setup will error unless it is marked + optional. Paths must be relative and + may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key to + project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 + or a decimal value between 0 + and 511. YAML accepts both octal + and decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume + defaultMode will be used. This + might be in conflict with other + options that affect the file + mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the + key to. May not be an absolute + path. May not contain the path + element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + downwardAPI: + description: Represents downward API info + for projecting into a projected volume. + Note that this is identical to a downwardAPI + volume source without the default mode. + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: ObjectFieldSelector + selects an APIVersioned field + of an object. + properties: + apiVersion: + description: Version of the + schema the FieldPath is + written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML + accepts both octal and decimal + values, JSON requires decimal + values for mode bits. If not + specified, the volume defaultMode + will be used. This might be + in conflict with other options + that affect the file mode, like + fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. + Must be utf-8 encoded. The first + item of the relative path must + not start with ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector + represents container resources + (cpu, memory) and their output + format + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + description: "Quantity is\ + \ a fixed-point representation\ + \ of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition\ + \ to String() and AsInt64()\ + \ accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\ + \n\t(Note that \ + \ may be empty, from the\ + \ \"\" case in .)\n\ + \n ::=\ + \ 0 | 1 | ... | 9 \ + \ ::= |\ + \ \ + \ ::= \ + \ | . |\ + \ . | .\ + \ ::=\ + \ \"+\" | \"-\" \ + \ ::= | \ + \ ::=\ + \ | \ + \ | \ + \ ::= Ki | Mi | Gi\ + \ | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See:\ + \ http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::=\ + \ m | \"\" | k | M | G |\ + \ T | P | E\n\n\t(Note that\ + \ 1024 = 1Ki but 1000 =\ + \ 1k; I didn't choose the\ + \ capitalization.)\n\n\ + \ ::= \"e\" \ + \ | \"E\" \ + \ ```\n\nNo matter which\ + \ of the three exponent\ + \ forms is used, no quantity\ + \ may represent a number\ + \ greater than 2^63-1 in\ + \ magnitude, nor may it\ + \ have more than 3 decimal\ + \ places. Numbers larger\ + \ or more precise will be\ + \ capped or rounded up.\ + \ (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be\ + \ extended in the future\ + \ if we require larger or\ + \ smaller quantities.\n\n\ + When a Quantity is parsed\ + \ from a string, it will\ + \ remember the type of suffix\ + \ it had, and will use the\ + \ same type again when it\ + \ is serialized.\n\nBefore\ + \ serializing, Quantity\ + \ will be put in \"canonical\ + \ form\". This means that\ + \ Exponent/suffix will be\ + \ adjusted up or down (with\ + \ a corresponding increase\ + \ or decrease in Mantissa)\ + \ such that:\n\n- No precision\ + \ is lost - No fractional\ + \ digits will be emitted\ + \ - The exponent (or suffix)\ + \ is as large as possible.\n\ + \nThe sign will be omitted\ + \ unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will\ + \ be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized\ + \ as \"1536Mi\"\n\nNote\ + \ that the quantity will\ + \ NEVER be internally represented\ + \ by a floating point number.\ + \ That is the whole point\ + \ of this exercise.\n\n\ + Non-canonical values will\ + \ still parse as long as\ + \ they are well formed,\ + \ but will be re-emitted\ + \ in their canonical form.\ + \ (So always use canonical\ + \ form, or don't diff.)\n\ + \nThis format is intended\ + \ to make it difficult to\ + \ use these numbers without\ + \ writing some sort of special\ + \ handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point\ + \ implementation." + type: string + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: 'Adapts a secret into a projected + volume. + + + The contents of the target Secret''s Data + field will be presented in a projected + volume as files using the keys in the + Data field as the file names. Note that + this is identical to a secret volume source + without the default mode.' + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced Secret will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will + be projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not + present in the Secret, the volume + setup will error unless it is marked + optional. Paths must be relative and + may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key to + project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 + or a decimal value between 0 + and 511. YAML accepts both octal + and decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume + defaultMode will be used. This + might be in conflict with other + options that affect the file + mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the + key to. May not be an absolute + path. May not contain the path + element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + serviceAccountToken: + description: ServiceAccountTokenProjection + represents a projected service account + token volume. This projection can be used + to insert a service account token into + the pods runtime filesystem for use against + APIs (Kubernetes API Server or otherwise). + properties: + audience: + description: audience is the intended + audience of the token. A recipient + of a token must identify itself with + an identifier specified in the audience + of the token, and otherwise should + reject the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the + requested duration of validity of + the service account token. As the + token approaches expiration, the kubelet + volume plugin will proactively rotate + the service account token. The kubelet + will start trying to rotate the token + if the token is older than 80 percent + of its time to live or if the token + is older than 24 hours.Defaults to + 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative + to the mount point of the file to + project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + secret: + description: 'Adapts a Secret into a volume. + + + The contents of the target Secret''s Data field + will be presented in a volume as files using the + keys in the Data field as the file names. Secret + volumes support ownership management and SELinux + relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret + in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource references + the user''s PVC in the same namespace. This volume + finds the bound PV and mounts that volume for the + pod. A PersistentVolumeClaimVolumeSource is, essentially, + a wrapper around another type of volume that is + owned by someone else (the system). + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#persistentvolumeclaimvolumesource-v1-core' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this + volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly + setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + customInitContainers: + type: array + description: "A list of custom application init containers\ + \ that run within the shards cluster's Pods. The\n custom\ + \ init containers will run following the defined sequence\ + \ as the end of\n cluster's Pods init containers.\n\n\ + The name used in this section will be prefixed with the\ + \ string `c-` so that when\n referencing them in the\ + \ .spec.containers section of SGInstanceProfile the name\ + \ used\n have to be prepended with the same prefix.\n\ + \n**Changing this field may require a restart.**\n\nSee:\ + \ https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application init container that\ + \ run within the cluster's Pods. The custom init\n containers\ + \ will run following the defined sequence as the end\ + \ of cluster's Pods init\n containers.\n\nThe name used\ + \ in this section will be prefixed with the string `c-`\ + \ so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n have\ + \ to be prepended with the same prefix.\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. + If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ + are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set + in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container and + any service environment variables. If a variable + cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: EnvVarSource represents a source + for the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and + their output format + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to\ + \ String() and AsInt64() accessors.\n\ + \nThe serialization format is:\n\n\ + ``` ::= \n\ + \n\t(Note that may be empty,\ + \ from the \"\" case in .)\n\ + \n ::= 0 | 1 | ...\ + \ | 9 ::= \ + \ | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki | Mi |\ + \ Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" |\ + \ k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I\ + \ didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo\ + \ matter which of the three exponent\ + \ forms is used, no quantity may represent\ + \ a number greater than 2^63-1 in\ + \ magnitude, nor may it have more\ + \ than 3 decimal places. Numbers larger\ + \ or more precise will be capped or\ + \ rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended\ + \ in the future if we require larger\ + \ or smaller quantities.\n\nWhen a\ + \ Quantity is parsed from a string,\ + \ it will remember the type of suffix\ + \ it had, and will use the same type\ + \ again when it is serialized.\n\n\ + Before serializing, Quantity will\ + \ be put in \"canonical form\". This\ + \ means that Exponent/suffix will\ + \ be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa)\ + \ such that:\n\n- No precision is\ + \ lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe\ + \ sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"\ + 1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented\ + \ by a floating point number. That\ + \ is the whole point of this exercise.\n\ + \nNon-canonical values will still\ + \ parse as long as they are well formed,\ + \ but will be re-emitted in their\ + \ canonical form. (So always use canonical\ + \ form, or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code\ + \ in the hopes that that will cause\ + \ implementors to also use a fixed\ + \ point implementation." + type: string + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects a + key of a Secret. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container + is starting. When a key exists in multiple sources, + the value associated with the last source will take + precedence. Values defined by an Env with a duplicate + key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects a ConfigMap + to populate the environment variables with. + + + The contents of the target ConfigMap''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects a Secret + to populate the environment variables with. + + + The contents of the target Secret''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag + is specified, or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that the + management system should take in response to container + lifecycle events. For the PostStart and PreStop + lifecycle handlers, management of the container + blocks until the action is complete, unless the + container process fails, in which case the handler + is aborted. + properties: + postStart: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a + DNS_LABEL. Each container in a pod must have a unique + name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that + port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container + will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the + data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: Number of port to expose on the + pod's IP address. This must be a valid port + number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the + host. If specified, this must be a valid port + number, 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port + in a pod must have a unique name. Name for + the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, + TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: 'Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. + + + This is an alpha field and requires enabling + the DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set + for containers.' + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string + securityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields + are present in both SecurityContext and PodSecurityContext. When + both are set, the values in SecurityContext take + precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation + is true always when the container is: 1) run + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name is + windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities + from running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. + Processes in privileged containers are essentially + equivalent to root on the host. Defaults to + false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default + is DefaultProcMount which uses the container + runtime defaults for readonly paths and masked + paths. This requires the ProcMountType feature + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of + the container process. Uses runtime default + if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot be set + when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must + run as a non-root user. If true, the Kubelet + will validate the image at runtime to ensure + that it does not run as UID 0 (root) and fail + to start the container if it does. If unset + or false, no such validation will be performed. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of + the container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note that + this field cannot be set when spec.os.name is + windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels to + be applied to the container + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile source + may be set. + properties: + localhostProfile: + description: localhostProfile indicates a + profile defined in a file on the node should + be used. The profile must be preconfigured + on the node to work. Must be a descending + path, relative to the kubelet's configured + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. + type: string + type: + description: 'type indicates which kind of + seccomp profile will be applied. Valid options + are: + + + Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile + should be used. Unconfined - no profile + should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions contain + Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. + Defaults to the user specified in image + metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If + this is not set, reads from stdin in the container + will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should + close the stdin channel after it has been opened + by a single attach. When stdin is true the stdin + stream will remain open across multiple attach sessions. + If stdinOnce is set to true, stdin is opened on + container start, is empty until the first client + attaches to stdin, and then remains open and accepts + data until the client disconnects, at which time + stdin is closed and remains closed until the container + is restarted. If this flag is false, a container + processes that reads from stdin will never receive + an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to + which the container''s termination message will + be written is mounted into the container''s filesystem. + Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message + should be populated. File will use the contents + of terminationMessagePath to populate the container + status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output + if the termination message file is empty and the + container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is + smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of + a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. Behaves similarly to SubPath but + environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. If not + specified, the container runtime's default will + be used, which might be configured in the container + image. Cannot be updated. + type: string + customContainers: + type: array + description: "A list of custom application containers that\ + \ run within the coordinator cluster's Pods.\n\nThe name\ + \ used in this section will be prefixed with the string\ + \ `c-` so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n have to\ + \ be prepended with the same prefix.\n \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application container that run\ + \ within the cluster's Pods. The custom\n containers\ + \ will run following the defined sequence as the end\ + \ of cluster's Pods\n containers.\n\nThe name used in\ + \ this section will be prefixed with the string `c-`\ + \ so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n have\ + \ to be prepended with the same prefix.\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. + If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ + are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set + in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container and + any service environment variables. If a variable + cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: EnvVarSource represents a source + for the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and + their output format + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to\ + \ String() and AsInt64() accessors.\n\ + \nThe serialization format is:\n\n\ + ``` ::= \n\ + \n\t(Note that may be empty,\ + \ from the \"\" case in .)\n\ + \n ::= 0 | 1 | ...\ + \ | 9 ::= \ + \ | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki | Mi |\ + \ Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" |\ + \ k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I\ + \ didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo\ + \ matter which of the three exponent\ + \ forms is used, no quantity may represent\ + \ a number greater than 2^63-1 in\ + \ magnitude, nor may it have more\ + \ than 3 decimal places. Numbers larger\ + \ or more precise will be capped or\ + \ rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended\ + \ in the future if we require larger\ + \ or smaller quantities.\n\nWhen a\ + \ Quantity is parsed from a string,\ + \ it will remember the type of suffix\ + \ it had, and will use the same type\ + \ again when it is serialized.\n\n\ + Before serializing, Quantity will\ + \ be put in \"canonical form\". This\ + \ means that Exponent/suffix will\ + \ be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa)\ + \ such that:\n\n- No precision is\ + \ lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe\ + \ sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"\ + 1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented\ + \ by a floating point number. That\ + \ is the whole point of this exercise.\n\ + \nNon-canonical values will still\ + \ parse as long as they are well formed,\ + \ but will be re-emitted in their\ + \ canonical form. (So always use canonical\ + \ form, or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code\ + \ in the hopes that that will cause\ + \ implementors to also use a fixed\ + \ point implementation." + type: string + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects a + key of a Secret. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container + is starting. When a key exists in multiple sources, + the value associated with the last source will take + precedence. Values defined by an Env with a duplicate + key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects a ConfigMap + to populate the environment variables with. + + + The contents of the target ConfigMap''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects a Secret + to populate the environment variables with. + + + The contents of the target Secret''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag + is specified, or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that the + management system should take in response to container + lifecycle events. For the PostStart and PreStop + lifecycle handlers, management of the container + blocks until the action is complete, unless the + container process fails, in which case the handler + is aborted. + properties: + postStart: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a + DNS_LABEL. Each container in a pod must have a unique + name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that + port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container + will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the + data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: Number of port to expose on the + pod's IP address. This must be a valid port + number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the + host. If specified, this must be a valid port + number, 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port + in a pod must have a unique name. Name for + the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, + TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: 'Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. + + + This is an alpha field and requires enabling + the DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set + for containers.' + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string + securityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields + are present in both SecurityContext and PodSecurityContext. When + both are set, the values in SecurityContext take + precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation + is true always when the container is: 1) run + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name is + windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities + from running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. + Processes in privileged containers are essentially + equivalent to root on the host. Defaults to + false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default + is DefaultProcMount which uses the container + runtime defaults for readonly paths and masked + paths. This requires the ProcMountType feature + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of + the container process. Uses runtime default + if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot be set + when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must + run as a non-root user. If true, the Kubelet + will validate the image at runtime to ensure + that it does not run as UID 0 (root) and fail + to start the container if it does. If unset + or false, no such validation will be performed. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of + the container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note that + this field cannot be set when spec.os.name is + windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels to + be applied to the container + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile source + may be set. + properties: + localhostProfile: + description: localhostProfile indicates a + profile defined in a file on the node should + be used. The profile must be preconfigured + on the node to work. Must be a descending + path, relative to the kubelet's configured + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. + type: string + type: + description: 'type indicates which kind of + seccomp profile will be applied. Valid options + are: + + + Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile + should be used. Unconfined - no profile + should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions contain + Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. + Defaults to the user specified in image + metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If + this is not set, reads from stdin in the container + will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should + close the stdin channel after it has been opened + by a single attach. When stdin is true the stdin + stream will remain open across multiple attach sessions. + If stdinOnce is set to true, stdin is opened on + container start, is empty until the first client + attaches to stdin, and then remains open and accepts + data until the client disconnects, at which time + stdin is closed and remains closed until the container + is restarted. If this flag is false, a container + processes that reads from stdin will never receive + an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to + which the container''s termination message will + be written is mounted into the container''s filesystem. + Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message + should be populated. File will use the contents + of terminationMessagePath to populate the container + status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output + if the termination message file is empty and the + container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is + smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of + a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. Behaves similarly to SubPath but + environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. If not + specified, the container runtime's default will + be used, which might be configured in the container + image. Cannot be updated. + type: string + customVolumeMounts: + type: object + description: Custom Pod volumes to mount into the specified + container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the specified + container's filesystem. + items: + description: 'VolumeMount describes a mounting of a + Volume within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + customInitVolumeMounts: + type: object + description: Custom Pod volumes to mount into the specified + init container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the specified + init container's filesystem. + items: + description: 'VolumeMount describes a mounting of a + Volume within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + configurations: + type: object + description: 'Coordinator custom configurations. + + ' + properties: + sgPostgresConfig: + type: string + description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. It must exist. When not set, a default + Postgres config, for the major version selected, is used. + + + If sharding type is `shardingsphere` then this field is + ignored. + + + **Changing this field may require a restart.** + + ' + sgPoolingConfig: + type: string + description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. Each pod contains a sidecar with + a connection pooler (currently: [PgBouncer](https://www.pgbouncer.org/)). + The connection pooler is implemented as a sidecar. + + + If not set, a default configuration will be used. Disabling + connection pooling altogether is possible if the disableConnectionPooling + property of the pods object is set to true. + + + If sharding type is `shardingsphere` then this field is + ignored. + + + **Changing this field may require a restart.** + + ' + patroni: + type: object + description: 'Allow to specify Patroni configuration that + will extend the generated one + + + If sharding type is `shardingsphere` then this section + is ignored. + + ' + properties: + dynamicConfig: + type: object + description: 'Allow to specify Patroni dynamic configuration + that will overwrite the generated one. See https://patroni.readthedocs.io/en/latest/dynamic_configuration.html + + + The following configuration fields will be ignored: + + + * synchronous_mode + + * synchronous_mode_strict + + * failsafe_mode + + * postgresql + + * standby_cluster + + + If sharding type is `shardingsphere` then this section + is ignored. + + ' + x-kubernetes-preserve-unknown-fields: true + initialConfig: + type: object + description: 'Allow to specify Patroni configuration + that will overwrite the generated one. See https://patroni.readthedocs.io/en/latest/yaml_configuration.html + + + The following configuration fields will be ignored: + + + * name + + * namespace + + * log + + * bootstrap + + * citus + + * postgresql # with the exception of postgresql.callbacks, + postgresql.pre_promote, postgresql.before_stop and + postgresql.pg_ctl_timeout + + * restapi + + * ctl + + * watchdog + + * tags + + + If sharding type is `shardingsphere` then this section + is ignored. + + + **This field can only be set on creation.** + + ' + x-kubernetes-preserve-unknown-fields: true + shardingSphere: + type: object + description: 'Allow to specify Sharding Sphere Proxy configuration + that will extend the generated one. + + + This section is required when sharding type is `shardingsphere` + otherwise is ignored. + + ' + required: + - mode + properties: + version: + type: string + description: The version of the ShardingSphere Proxy. + If not specified latest version available will be + used. + mode: + type: object + description: Allow to configure the Sharding Shpere + Proxy mode. + required: + - type + - repository + properties: + type: + type: string + description: 'Allow to configure the Sharding Shpere + Proxy mode type. Options available are: + + + * `Standalone` + + * `Cluster` + + + When `Standalone` only 1 coordinator instance + may be set. + + ' + properties: + type: object + description: 'Properties that will be set in the + ShardingSphere Proxy configuration. + + + Some properties will be overwritten with the configuration + generated by the operator. In particular: + + + * `proxy-frontend-database-protocol-type` + + * `proxy-default-port` + + ' + x-kubernetes-preserve-unknown-fields: true + repository: + type: object + required: + - type + properties: + type: + type: string + description: 'Allow to configure the Sharding + Shpere Proxy repository type. Options available + are: + + + * `Memory` + + * `ZooKeeper` + + * `Etcd` + + + When `mode.type` is `standalone` then `repository.type` + must be memory. + + When `mode.type` is `cluster` then `repository.type` + could be any of zooKeeper or etcd. + + ' + properties: + type: object + description: 'Properties that will be set in + the ShardingSphere Proxy configuration for + the Repository. + + + Some properties will be overwritten with the + configuration generated by the operator. In + particular: + + + * `server-lists` + + ' + x-kubernetes-preserve-unknown-fields: true + zooKeeper: + type: object + description: Allow to configure ZooKeeper repository + for Sharding Shpere Proxy. + required: + - serverList + properties: + serverList: + type: array + description: List of ZooKeeper servers to + connect to. + items: + type: string + description: ZooKeeper server to connect + to. + etcd: + type: object + description: Allow to configure Etcd repository + for Sharding Shpere Proxy. + required: + - serverList + properties: + serverList: + type: array + description: List of Etcd servers to connect + to. + items: + type: string + description: Etcd server to connect to. + properties: + type: object + description: 'Properties that will be set in the ShardingSphere + Proxy configuration. + + + Some properties will be overwritten with the configuration + generated by the operator. In particular: + + + * `server-lists` + + ' + x-kubernetes-preserve-unknown-fields: true + authority: + type: object + description: Allow to configure the Sharding Shpere + Proxy authority. + properties: + users: + type: array + description: 'Allow to configure extra users other + than the superuser (by default superuser username + is postgres). + + + See also https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-proxy/yaml-config/authority/#authentication-configuration + + ' + items: + type: object + description: 'Allow to configure extra user other + than the superuser (by default superuser username + is postgres). + + ' + required: + - user + - password + properties: + user: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the username of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to + select from. Must be a valid secret + key. + password: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the password of the user. + + ' + required: + - name + - key + properties: + name: + type: string + description: Name of the referent. [More + information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to + select from. Must be a valid secret + key. + privilege: + type: object + description: Allow to configure the Sharding Shpere + Proxy authority privilege. + properties: + type: + type: string + description: 'Allow to configure the Sharding + Shpere Proxy authority privilege type. + + + See also https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-proxy/yaml-config/authority/#authorization-configuration + + ' + userDatabaseMappings: + type: string + description: 'Allow to configure the mappings + between users and databases. + + + See also https://shardingsphere.apache.org/document/current/en/user-manual/shardingsphere-proxy/yaml-config/authority/#database_permitted + + ' + serviceAccount: + type: object + description: "Section to configure ServiceAccount used\ + \ by ShardingSphere operator.\n\nYou may configure\ + \ a global value under operator configuration section\n\ + \ `SGConfig.spec.shardingSphere.serviceAccount`.\n" + required: + - namespace + - name + properties: + namespace: + type: string + description: The namespace of the ServiceAccount + used by ShardingSphere operator + name: + type: string + description: The name of the ServiceAccount used + by ShardingSphere operator + replication: + type: object + description: "This section allows to configure the global Postgres\ + \ replication mode.\n\nThe main replication group is implicit\ + \ and contains the total number of instances less the sum\ + \ of all\n instances in other replication groups.\n\nThe\ + \ total number of instances is always specified by `.spec.instances`.\n\ + \nIf sharding type is `shardingsphere` then this section is\ + \ ignored.\n" + properties: + mode: + type: string + description: 'The replication mode applied to the whole + cluster. + + Possible values are: + + * `async` (default) + + * `sync` + + * `strict-sync` + + * `sync-all` + + * `strict-sync-all` + + + **async** + + + When in asynchronous mode the cluster is allowed to lose + some committed transactions. + + When the primary server fails or becomes unavailable for + any other reason a sufficiently healthy standby + + will automatically be promoted to primary. Any transactions + that have not been replicated to that standby + + remain in a "forked timeline" on the primary, and are + effectively unrecoverable (the data is still there, + + but recovering it requires a manual recovery effort by + data recovery specialists). + + + **sync** + + + When in synchronous mode a standby will not be promoted + unless it is certain that the standby contains all + + transactions that may have returned a successful commit + status to client (clients can change the behavior + + per transaction using PostgreSQL’s `synchronous_commit` + setting. Transactions with `synchronous_commit` + + values of `off` and `local` may be lost on fail over, + but will not be blocked by replication delays). This + + means that the system may be unavailable for writes even + though some servers are available. System + + administrators can still use manual failover commands + to promote a standby even if it results in transaction + + loss. + + + Synchronous mode does not guarantee multi node durability + of commits under all circumstances. When no suitable + + standby is available, primary server will still accept + writes, but does not guarantee their replication. When + + the primary fails in this mode no standby will be promoted. + When the host that used to be the primary comes + + back it will get promoted automatically, unless system + administrator performed a manual failover. This behavior + + makes synchronous mode usable with 2 node clusters. + + + When synchronous mode is used and a standby crashes, commits + will block until the primary is switched to standalone + + mode. Manually shutting down or restarting a standby will + not cause a commit service interruption. Standby will + + signal the primary to release itself from synchronous + standby duties before PostgreSQL shutdown is initiated. + + + **strict-sync** + + + When it is absolutely necessary to guarantee that each + write is stored durably on at least two nodes, use the + strict + + synchronous mode. This mode prevents synchronous replication + to be switched off on the primary when no synchronous + + standby candidates are available. As a downside, the primary + will not be available for writes (unless the Postgres + + transaction explicitly turns off `synchronous_mode` parameter), + blocking all client write requests until at least one + + synchronous replica comes up. + + + **Note**: Because of the way synchronous replication is + implemented in PostgreSQL it is still possible to lose + + transactions even when using strict synchronous mode. + If the PostgreSQL backend is cancelled while waiting to + acknowledge + + replication (as a result of packet cancellation due to + client timeout or backend failure) transaction changes + become + + visible for other backends. Such changes are not yet replicated + and may be lost in case of standby promotion. + + + **sync-all** + + + The same as `sync` but `syncInstances` is ignored and + the number of synchronous instances is equals to the total + number + + of instances less one. + + + **strict-sync-all** + + + The same as `strict-sync` but `syncInstances` is ignored + and the number of synchronous instances is equals to the + total number + + of instances less one. + + ' + default: sync-all + syncInstances: + type: integer + minimum: 1 + description: "Number of synchronous standby instances. Must\ + \ be less than the total number of instances. It is set\ + \ to 1 by default.\n Only setteable if mode is `sync`\ + \ or `strict-sync`.\n" + initialization: + type: object + description: 'Allow to specify how the replicas are initialized. + + ' + properties: + mode: + type: string + description: "Allow to specify how the replicas are\ + \ initialized.\n\nPossible values are:\n\n* `FromPrimary`:\ + \ When this mode is used replicas will be always created\ + \ from the primary using `pg_basebackup`.\n* `FromReplica`:\ + \ When this mode is used replicas will be created\ + \ from another existing replica using\n `pg_basebackup`.\ + \ Fallsback to `FromPrimary` if there's no replica\ + \ or it fails.\n* `FromExistingBackup`: When this\ + \ mode is used replicas will be created from an existing\ + \ SGBackup. If `backupNewerThan` is set\n the SGBackup\ + \ must be newer than its value. When this mode fails\ + \ to restore an SGBackup it will try with a previous\ + \ one (if exists).\n Fallsback to `FromReplica` if\ + \ there's no backup left or it fails.\n* `FromNewlyCreatedBackup`:\ + \ When this mode is used replicas will be created\ + \ from a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent\ + \ backup newer than its value or it fails.\n" + default: FromExistingBackup + backupNewerThan: + type: string + description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`,\ + \ that specifies how old an SGBackup have to be in\ + \ order to be seleceted\n to initialize a replica.\n\ + \nWhen `FromExistingBackup` mode is set this field\ + \ restrict the selection of SGBackup to be used for\ + \ recovery newer than the\n specified value. \n\n\ + When `FromNewlyCreatedBackup` mode is set this field\ + \ skip the creation SGBackup to be used for recovery\ + \ if one newer than\n the specified value exists.\ + \ \n" + backupRestorePerformance: + type: object + description: 'Configuration that affects the backup + network and disk usage performance during recovery. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth used + when storing a backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to read the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to the minimum between the number of + file to read and 10. + + ' + metadata: + type: object + description: 'Metadata information from coordinator cluster + created resources. + + + If sharding type is `shardingsphere` then this section is + applied to the ComputeNode. + + ' + properties: + annotations: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + to be passed to resources created and managed by StackGres. + properties: + allResources: + type: object + description: Annotations to attach to any resource created + or managed by StackGres. + additionalProperties: + type: string + clusterPods: + type: object + description: Annotations to attach to pods created or + managed by StackGres. + additionalProperties: + type: string + services: + type: object + description: Annotations to attach to all services created + or managed by StackGres. + additionalProperties: + type: string + primaryService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-primary` service. + additionalProperties: + type: string + replicasService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-replicas` service. + additionalProperties: + type: string + labels: + type: object + description: Custom Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to be passed to resources created and managed by StackGres. + properties: + clusterPods: + type: object + description: Labels to attach to Pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Labels to attach to Services and Endpoints + created or managed by StackGres. + additionalProperties: + type: string + shards: + type: object + description: 'The shards are a group of StackGres clusters where + the partitioned data chunks are stored. + + + When referring to the cluster in the descriptions belove it apply + to any shard''s StackGres cluster. + + ' + required: + - clusters + - instancesPerCluster + - pods + properties: + clusters: + type: integer + minimum: 0 + description: 'Number of shard''s StackGres clusters + + ' + instancesPerCluster: + type: integer + minimum: 0 + description: "Number of StackGres instances per shard's StackGres\ + \ cluster. Each instance contains one Postgres server.\n \ + \ Out of all of the Postgres servers, one is elected as the\ + \ primary, the rest remain as read-only replicas.\n" + autoscaling: + type: object + description: 'This section allows to configure vertical Pod + autoscaling for the SGCluster''s Pods. + + + Vertical Pod Autoscaling will use cpu and memory usage as + the metric to control the upscale or downscale of the Pod + requests and limits resources. + + Vertical Pod Autoscaling requires the [Vertical Pod Autoscaler + operator](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) + to be installed in the Kuberentes cluster. + + ' + properties: + mode: + type: string + description: 'Allow to enable or disable any of horizontal + and vertical Pod autoscaling. + + + Possible values are: + + * `vertical`: only vertical Pod autoscaling will be enabled + (default) + + * `none`: all autoscaling will be disabled + + ' + enum: + - vertical + - none + default: vertical + minAllowed: + type: object + description: 'Allow to define the lower bound for Pod resources + of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the lower bound for Pod + resources of patroni container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the patroni + container + memory: + type: string + description: The minimum allowed memory for the + patroni container + pgbouncer: + type: object + description: 'Allow to define the lower bound for Pod + resources of pgbouncer container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the pgbouncer + container + memory: + type: string + description: The minimum allowed memory for the + pgbouncer container + envoy: + type: object + description: 'Allow to define the lower bound for Pod + resources of envoy container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the envoy + container + memory: + type: string + description: The minimum allowed memory for the + envoy container + maxAllowed: + type: object + description: 'Allow to define the higher bound for Pod resources + of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the higher bound for Pod + resources of patroni container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the patroni + container + memory: + type: string + description: The maximum allowed memory for the + patroni container + pgbouncer: + type: object + description: 'Allow to define the higher bound for Pod + resources of pgbouncer container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the pgbouncer + container + memory: + type: string + description: The maximum allowed memory for the + pgbouncer container + envoy: + type: object + description: 'Allow to define the higher bound for Pod + resources of envoy container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the envoy + container + memory: + type: string + description: The maximum allowed memory for the + envoy container + horizontal: + type: object + description: 'Section to configure horizontal Pod autoscaling + aspects. + + ' + properties: + eplicasConnectionsUsageTarget: + type: string + description: 'The target value for replicas connections + used in order to trigger the upscale of replica instances. + + ' + default: '0.8' + replicasConnectionsUsageMetricType: + type: string + description: 'The metric type for connections used metric. + See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + default: AverageValue + cooldownPeriod: + type: integer + description: 'The period in seconds before the downscale + of replica instances can be triggered. + + ' + default: 300 + pollingInterval: + type: integer + description: 'The interval in seconds to check if the + scaleup or scaledown have to be triggered. + + ' + default: 30 + vertical: + type: object + description: 'Section to configure vertical Pod autoscaling + aspects. + + ' + properties: + recommender: + type: string + description: 'Recommender responsible for generating + recommendation for vertical Pod autoscaling. If not + specified the default one will be used. + + ' + sgInstanceProfile: + type: string + description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/reference/crd/sginstanceprofile/). + + + A SGInstanceProfile defines CPU and memory limits. Must exist + before creating a cluster. + + + When no profile is set, a default (1 core, 2 GiB RAM) one + is used. + + + **Changing this field may require a restart.** + + ' + managedSql: + type: object + description: 'This section allows to reference SQL scripts that + will be applied to the cluster live. + + ' + properties: + continueOnSGScriptError: + type: boolean + description: If true, when any entry of any `SGScript` fail + will not prevent subsequent `SGScript` from being executed. + By default is `false`. + scripts: + type: array + description: 'A list of script references that will be executed + in sequence. + + ' + items: + type: object + description: "A script reference. Each version of each\ + \ entry of the script referenced will be executed exactly\ + \ once following the sequence defined\n in the referenced\ + \ script and skipping any script entry that have already\ + \ been executed.\n" + properties: + id: + type: integer + description: The id is immutable and must be unique + across all the `SGScript` entries. It is replaced + by the operator and is used to identify the `SGScript` + entry. + sgScript: + type: string + description: A reference to an `SGScript` + pods: + type: object + description: Cluster pod's configuration. + required: + - persistentVolume + properties: + persistentVolume: + type: object + description: Pod's persistent volume configuration. + required: + - size + properties: + size: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$ + description: 'Size of the PersistentVolume set for each + instance of the cluster. This size is specified either + in Mebibytes, Gibibytes or Tebibytes (multiples of + 2^20, 2^30 or 2^40, respectively). + + ' + storageClass: + type: string + description: 'Name of an existing StorageClass in the + Kubernetes cluster, used to create the PersistentVolumes + for the instances of the cluster. + + ' + disableConnectionPooling: + type: boolean + description: 'If set to `true`, avoids creating a connection + pooling (using [PgBouncer](https://www.pgbouncer.org/)) + sidecar. + + + **Changing this field may require a restart.** + + ' + disableMetricsExporter: + type: boolean + description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + disablePostgresUtil: + type: boolean + description: 'If set to `true`, avoids creating the `postgres-util` + sidecar. This sidecar contains usual Postgres administration + utilities *that are not present in the main (`patroni`) + container*, like `psql`. Only disable if you know what + you are doing. + + + **Changing this field may require a restart.** + + ' + disableEnvoy: + type: boolean + description: 'If set to `true`, avoids creating the `envoy` + sidecar. This sidecar is used as the endge proxy for the + cluster''s Pods providing extra metrics to the monitoring + layer. + + + **Changing this field may require a restart.** + + ' + resources: + type: object + description: Pod custom resources configuration. + properties: + enableClusterLimitsRequirements: + type: boolean + description: 'When enabled resource limits for containers + other than the patroni container wil be set just like + for patroni contianer as specified in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + disableResourcesRequestsSplitFromTotal: + type: boolean + description: "When set to `true` the resources requests\ + \ values in fields `SGInstanceProfile.spec.requests.cpu`\ + \ and `SGInstanceProfile.spec.requests.memory` will\ + \ represent the resources\n requests of the patroni\ + \ container and the total resources requests calculated\ + \ by adding the resources requests of all the containers\ + \ (including the patroni container).\n\n**Changing\ + \ this field may require a restart.**\n" + failWhenTotalIsHigher: + type: boolean + description: "When set to `true` the reconciliation\ + \ of the cluster will fail if `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of the\ + \ CPU or memory\n of all the containers except patroni\ + \ is equals or higher than the total specified in\ + \ `SGInstanceProfile.spec.requests.cpu` or `SGInstanceProfile.spec.requests.memory`.\n\ + \nWhen `false` (the default) and `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of the\ + \ CPU or memory\n of all the containers except patroni\ + \ is equals or higher than the total specified in\ + \ `SGInstanceProfile.spec.requests.cpu` or `SGInstanceProfile.spec.requests.memory`\n\ + \ then the patroni container resources will be set\ + \ to 0.\n" + scheduling: + type: object + description: 'Pod custom scheduling, affinity and topology + spread constratins configuration. + + + **Changing this field may require a restart.** + + ' + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must + be true for the pod to fit on a node. Selector which + must match a node''s labels for the pod to be scheduled + on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to + tolerates any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; + this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and + Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the + period of time the toleration (which must be + of effect NoExecute, otherwise this field is + ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node matches the corresponding + matchExpressions; the node(s) with the highest + sum are the most preferred. + items: + description: An empty preferred scheduling term + matches all objects with implicit weight 0 (i.e. + it's a no-op). A null preferred scheduling term + matches no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union + of the results of one or more label queries over + a set of nodes; that is, it represents the OR + of the selectors represented by the node selector + terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string + values. If the operator is In + or NotIn, the values array must + be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. If + the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. + "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate the highest + priorities with the former being the highest priority. + Any other name must be defined by creating a PriorityClass + object with that name. If not specified, the pod priority + will be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. If + the affinity requirements specified by this field + cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may + or may not try to eventually evict the pod from + its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter + pod anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the one with + the greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added per-node + to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. If + the anti-affinity requirements specified by this + field cease to be met at some point during pod + execution (e.g. due to a pod label update), the + system may or may not try to eventually evict + the pod from its node. When there are multiple + elements, the lists of nodes corresponding to + each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + topologySpreadConstraints: + description: 'TopologySpreadConstraints describes how + a group of pods ought to spread across topology domains. + Scheduler will schedule pods in a way which abides + by the constraints. All topologySpreadConstraints + are ANDed. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#topologyspreadconstraint-v1-core' + items: + description: TopologySpreadConstraint specifies how + to spread matching pods among the given topology. + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: 'MatchLabelKeys is a set of pod label + keys to select the pods over which spreading + will be calculated. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are ANDed with labelSelector to select + the group of existing pods over which spreading + will be calculated for the incoming pod. The + same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. MatchLabelKeys cannot be + set when LabelSelector isn''t set. Keys that + don''t exist in the incoming pod labels will + be ignored. A null or empty list means only + match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default).' + items: + type: string + type: array + maxSkew: + description: 'MaxSkew describes the degree to + which pods may be unevenly distributed. When + `whenUnsatisfiable=DoNotSchedule`, it is the + maximum permitted difference between the number + of matching pods in the target topology and + the global minimum. The global minimum is the + minimum number of matching pods in an eligible + domain or zero if the number of eligible domains + is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with + the same labelSelector spread as 2/2/1: In this + case, the global minimum is 1. | zone1 | zone2 + | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default + value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: 'MinDomains indicates a minimum number + of eligible domains. When the number of eligible + domains with matching topology keys is less + than minDomains, Pod Topology Spread treats + "global minimum" as 0, and then the calculation + of Skew is performed. And when the number of + eligible domains with matching topology keys + equals or greater than minDomains, this value + has no effect on scheduling. As a result, when + the number of eligible domains is less than + minDomains, scheduler won''t schedule more than + maxSkew Pods to those domains. If value is nil, + the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than + 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is + set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 + | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), + so "global minimum" is treated as 0. In this + situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will + be 3(3 - 0) if new Pod is scheduled to any of + the three zones, it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default).' + format: int32 + type: integer + nodeAffinityPolicy: + description: 'NodeAffinityPolicy indicates how + we will treat Pod''s nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options + are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: + nodeAffinity/nodeSelector are ignored. All nodes + are included in the calculations. + + + If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + nodeTaintsPolicy: + description: 'NodeTaintsPolicy indicates how we + will treat node taints when calculating pod + topology spread skew. Options are: - Honor: + nodes without taints, along with tainted nodes + for which the incoming pod has a toleration, + are included. - Ignore: node taints are ignored. + All nodes are included. + + + If this value is nil, the behavior is equivalent + to the Ignore policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", + and try to put balanced number of pods into + each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible + domain as a domain whose nodes meet the requirements + of nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, + if TopologyKey is "topology.kubernetes.io/zone", + each zone is a domain of that topology. It's + a required field. + type: string + whenUnsatisfiable: + description: "WhenUnsatisfiable indicates how\ + \ to deal with a pod if it doesn't satisfy the\ + \ spread constraint. - DoNotSchedule (default)\ + \ tells the scheduler not to schedule it. -\ + \ ScheduleAnyway tells the scheduler to schedule\ + \ the pod in any location,\n but giving higher\ + \ precedence to topologies that would help reduce\ + \ the\n skew.\nA constraint is considered \"\ + Unsatisfiable\" for an incoming pod if and only\ + \ if every possible node assignment for that\ + \ pod would violate \"MaxSkew\" on some topology.\ + \ For example, in a 3-zone cluster, MaxSkew\ + \ is set to 1, and pods with the same labelSelector\ + \ spread as 3/1/1: | zone1 | zone2 | zone3 |\ + \ | P P P | P | P | If WhenUnsatisfiable\ + \ is set to DoNotSchedule, incoming pod can\ + \ only be scheduled to zone2(zone3) to become\ + \ 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3)\ + \ satisfies MaxSkew(1). In other words, the\ + \ cluster can still be imbalanced, but scheduler\ + \ won't make it *more* imbalanced. It's a required\ + \ field." + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + backup: + type: object + description: Backup Pod custom scheduling and affinity + configuration. + properties: + nodeSelector: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the + union of the results of one or more label + queries over a set of nodes; that is, it represents + the OR of the selectors represented by the + node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + tolerations: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the + union of the results of one or more label + queries over a set of nodes; that is, it represents + the OR of the selectors represented by the + node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + nodeAffinity: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the + union of the results of one or more label + queries over a set of nodes; that is, it represents + the OR of the selectors represented by the + node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. + "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate the highest + priorities with the former being the highest priority. + Any other name must be defined by creating a PriorityClass + object with that name. If not specified, the pod + priority will be default or zero if there is no + default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod + affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest + sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set + of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key in (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key notin (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to a pod label update), + the system may or may not try to eventually + evict the pod from its node. When there are + multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter + pod anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity + expressions specified by this field, but it + may choose a node that violates one or more + of the expressions. The node that is most + preferred is the one with the greatest sum + of weights, i.e. for each node that meets + all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most + preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + properties: + labelSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set + of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key in (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with + `LabelSelector` as `key notin (value)` + to select the group of existing + pods which pods will be taken into + consideration for the incoming pod's + pod (anti) affinity. Keys that don't + exist in the incoming pod labels + will be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a + label query over a set of resources. + The result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto the + node. If the anti-affinity requirements specified + by this field cease to be met at some point + during pod execution (e.g. due to a pod label + update), the system may or may not try to + eventually evict the pod from its node. When + there are multiple elements, the lists of + nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of + pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be + taken into consideration for the incoming + pod's pod (anti) affinity. Keys that + don't exist in the incoming pod labels + will be ignored. The default value is + empty. The same key is forbidden to + exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when + LabelSelector isn't set. This is an + alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set + of pod label keys to select which pods + will be taken into consideration. The + keys are used to lookup values from + the incoming pod labels, those key-value + labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods will + be taken into consideration for the + incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys and + LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions + are ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + managementPolicy: + type: string + description: "managementPolicy controls how pods are created\ + \ during initial scale up, when replacing pods\n on nodes,\ + \ or when scaling down. The default policy is `OrderedReady`,\ + \ where pods are created\n in increasing order (pod-0,\ + \ then pod-1, etc) and the controller will wait until\ + \ each pod is\n ready before continuing. When scaling\ + \ down, the pods are removed in the opposite order.\n\ + \ The alternative policy is `Parallel` which will create\ + \ pods in parallel to match the desired\n scale without\ + \ waiting, and on scale down will delete all pods at once.\n" + customVolumes: + type: array + description: "A list of custom volumes that may be used\ + \ along with any container defined in\n customInitContainers\ + \ or customContainers sections for the shards.\n\nThe\ + \ name used in this section will be prefixed with the\ + \ string `c-` so that when\n referencing them in the\ + \ customInitContainers or customContainers sections the\ + \ name used\n have to be prepended with the same prefix.\n\ + \nOnly the following volume types are allowed: configMap,\ + \ downwardAPI, emptyDir,\n gitRepo, glusterfs, hostPath,\ + \ nfs, projected and secret\n\n**Changing this field may\ + \ require a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + items: + type: object + description: "A custom volume that may be used along with\ + \ any container defined in\n customInitContainers or\ + \ customContainers sections.\n\nThe name used in this\ + \ section will be prefixed with the string `c-` so that\ + \ when\n referencing them in the customInitContainers\ + \ or customContainers sections the name used\n have\ + \ to be prepended with the same prefix.\n\nOnly the\ + \ following volume types are allowed: configMap, downwardAPI,\ + \ emptyDir,\n gitRepo, glusterfs, hostPath, nfs, projected\ + \ and secret\n\n**Changing this field may require a\ + \ restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + properties: + name: + description: 'name of the custom volume. The name + will be implicitly prefixed with `c-` to avoid clashing + with internal operator volume names. Must be a DNS_LABEL + and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + ' + type: string + configMap: + description: 'Adapts a ConfigMap into a volume. + + + The contents of the target ConfigMap''s Data field + will be presented in a volume as files using the + keys in the Data field as the file names, unless + the items element is populated with specific mappings + of keys to paths. ConfigMap volumes support ownership + management and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#configmapvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: 'DownwardAPIVolumeSource represents a + volume containing downward API info. Downward API + volumes support ownership management and SELinux + relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#downwardapivolumesource-v1-core' + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and + their output format + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to\ + \ String() and AsInt64() accessors.\n\ + \nThe serialization format is:\n\n\ + ``` ::= \n\ + \n\t(Note that may be empty,\ + \ from the \"\" case in .)\n\ + \n ::= 0 | 1 | ...\ + \ | 9 ::= \ + \ | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki | Mi |\ + \ Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" |\ + \ k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I\ + \ didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo\ + \ matter which of the three exponent\ + \ forms is used, no quantity may represent\ + \ a number greater than 2^63-1 in\ + \ magnitude, nor may it have more\ + \ than 3 decimal places. Numbers larger\ + \ or more precise will be capped or\ + \ rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended\ + \ in the future if we require larger\ + \ or smaller quantities.\n\nWhen a\ + \ Quantity is parsed from a string,\ + \ it will remember the type of suffix\ + \ it had, and will use the same type\ + \ again when it is serialized.\n\n\ + Before serializing, Quantity will\ + \ be put in \"canonical form\". This\ + \ means that Exponent/suffix will\ + \ be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa)\ + \ such that:\n\n- No precision is\ + \ lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe\ + \ sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"\ + 1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented\ + \ by a floating point number. That\ + \ is the whole point of this exercise.\n\ + \nNon-canonical values will still\ + \ parse as long as they are well formed,\ + \ but will be re-emitted in their\ + \ canonical form. (So always use canonical\ + \ form, or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code\ + \ in the hopes that that will cause\ + \ implementors to also use a fixed\ + \ point implementation." + type: string + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'Represents an empty directory for a + pod. Empty directory volumes support ownership management + and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#emptydirvolumesource-v1-core' + properties: + medium: + description: 'medium represents what type of storage + medium should back this directory. The default + is "" which means to use the node''s default + medium. Must be an empty string (default) or + Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::= \n\ + \n\t(Note that may be empty, from the\ + \ \"\" case in .)\n\n \ + \ ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | . |\ + \ . | . ::=\ + \ \"+\" | \"-\" ::= \ + \ | ::= \ + \ | | \ + \ ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G\ + \ | T | P | E\n\n\t(Note that 1024 = 1Ki but\ + \ 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter which\ + \ of the three exponent forms is used, no quantity\ + \ may represent a number greater than 2^63-1\ + \ in magnitude, nor may it have more than 3\ + \ decimal places. Numbers larger or more precise\ + \ will be capped or rounded up. (E.g.: 0.1m\ + \ will rounded up to 1m.) This may be extended\ + \ in the future if we require larger or smaller\ + \ quantities.\n\nWhen a Quantity is parsed from\ + \ a string, it will remember the type of suffix\ + \ it had, and will use the same type again when\ + \ it is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This means\ + \ that Exponent/suffix will be adjusted up or\ + \ down (with a corresponding increase or decrease\ + \ in Mantissa) such that:\n\n- No precision\ + \ is lost - No fractional digits will be emitted\ + \ - The exponent (or suffix) is as large as\ + \ possible.\n\nThe sign will be omitted unless\ + \ the number is negative.\n\nExamples:\n\n-\ + \ 1.5 will be serialized as \"1500m\" - 1.5Gi\ + \ will be serialized as \"1536Mi\"\n\nNote that\ + \ the quantity will NEVER be internally represented\ + \ by a floating point number. That is the whole\ + \ point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well\ + \ formed, but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or don't\ + \ diff.)\n\nThis format is intended to make\ + \ it difficult to use these numbers without\ + \ writing some sort of special handling code\ + \ in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + type: object + gitRepo: + description: 'Represents a volume that is populated + with the contents of a git repository. Git repo + volumes do not support ownership management. Git + repo volumes support SELinux relabeling. + + + DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into + an InitContainer that clones the repo using git, + then mount the EmptyDir into the Pod''s container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#gitrepovolumesource-v1-core' + properties: + directory: + description: directory is the target directory + name. Must not contain or start with '..'. If + '.' is supplied, the volume directory will be + the git repository. Otherwise, if specified, + the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Represents a Glusterfs mount that lasts + the lifetime of a pod. Glusterfs volumes do not + support ownership management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#glusterfsvolumesource-v1-core' + properties: + endpoints: + description: 'endpoints is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs + volume to be mounted with read-only permissions. + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'Represents a host path mapped into a + pod. Host path volumes do not support ownership + management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#hostpathvolumesource-v1-core' + properties: + path: + description: 'path of the directory on the host. + If the path is a symlink, it will follow the + link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults + to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + nfs: + description: 'Represents an NFS mount that lasts the + lifetime of a pod. NFS volumes do not support ownership + management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nfsvolumesource-v1-core' + properties: + path: + description: 'path that is exported by the NFS + server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS + export to be mounted with read-only permissions. + Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address + of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - server + - path + type: object + projected: + description: 'Represents a projected volume source + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#projectedvolumesource-v1-core' + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires + decimal values for mode bits. Directories within + the path are not affected by this setting. This + might be in conflict with other options that + affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundleProjection + describes how to select a set of ClusterTrustBundle + objects and project their contents into + the pod filesystem. + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The + result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector + requirement is a selector that + contains values, a key, and + an operator that relates the + key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In or + NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be + empty. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single + {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator is + "In", and the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: If true, don't block pod + startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, + then the named ClusterTrustBundle + is allowed not to exist. If using + signerName, then the combination of + signerName and labelSelector is allowed + to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the + volume root to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles + that match this signer name. Mutually-exclusive + with name. The contents of all selected + ClusterTrustBundles will be unified + and deduplicated. + type: string + required: + - path + type: object + configMap: + description: 'Adapts a ConfigMap into a + projected volume. + + + The contents of the target ConfigMap''s + Data field will be presented in a projected + volume as files using the keys in the + Data field as the file names, unless the + items element is populated with specific + mappings of keys to paths. Note that this + is identical to a configmap volume source + without the default mode.' + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced ConfigMap will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will + be projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not + present in the ConfigMap, the volume + setup will error unless it is marked + optional. Paths must be relative and + may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key to + project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 + or a decimal value between 0 + and 511. YAML accepts both octal + and decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume + defaultMode will be used. This + might be in conflict with other + options that affect the file + mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the + key to. May not be an absolute + path. May not contain the path + element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + downwardAPI: + description: Represents downward API info + for projecting into a projected volume. + Note that this is identical to a downwardAPI + volume source without the default mode. + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: ObjectFieldSelector + selects an APIVersioned field + of an object. + properties: + apiVersion: + description: Version of the + schema the FieldPath is + written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML + accepts both octal and decimal + values, JSON requires decimal + values for mode bits. If not + specified, the volume defaultMode + will be used. This might be + in conflict with other options + that affect the file mode, like + fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. + Must be utf-8 encoded. The first + item of the relative path must + not start with ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector + represents container resources + (cpu, memory) and their output + format + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + description: "Quantity is\ + \ a fixed-point representation\ + \ of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition\ + \ to String() and AsInt64()\ + \ accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\ + \n\t(Note that \ + \ may be empty, from the\ + \ \"\" case in .)\n\ + \n ::=\ + \ 0 | 1 | ... | 9 \ + \ ::= |\ + \ \ + \ ::= \ + \ | . |\ + \ . | .\ + \ ::=\ + \ \"+\" | \"-\" \ + \ ::= | \ + \ ::=\ + \ | \ + \ | \ + \ ::= Ki | Mi | Gi\ + \ | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See:\ + \ http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::=\ + \ m | \"\" | k | M | G |\ + \ T | P | E\n\n\t(Note that\ + \ 1024 = 1Ki but 1000 =\ + \ 1k; I didn't choose the\ + \ capitalization.)\n\n\ + \ ::= \"e\" \ + \ | \"E\" \ + \ ```\n\nNo matter which\ + \ of the three exponent\ + \ forms is used, no quantity\ + \ may represent a number\ + \ greater than 2^63-1 in\ + \ magnitude, nor may it\ + \ have more than 3 decimal\ + \ places. Numbers larger\ + \ or more precise will be\ + \ capped or rounded up.\ + \ (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be\ + \ extended in the future\ + \ if we require larger or\ + \ smaller quantities.\n\n\ + When a Quantity is parsed\ + \ from a string, it will\ + \ remember the type of suffix\ + \ it had, and will use the\ + \ same type again when it\ + \ is serialized.\n\nBefore\ + \ serializing, Quantity\ + \ will be put in \"canonical\ + \ form\". This means that\ + \ Exponent/suffix will be\ + \ adjusted up or down (with\ + \ a corresponding increase\ + \ or decrease in Mantissa)\ + \ such that:\n\n- No precision\ + \ is lost - No fractional\ + \ digits will be emitted\ + \ - The exponent (or suffix)\ + \ is as large as possible.\n\ + \nThe sign will be omitted\ + \ unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will\ + \ be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized\ + \ as \"1536Mi\"\n\nNote\ + \ that the quantity will\ + \ NEVER be internally represented\ + \ by a floating point number.\ + \ That is the whole point\ + \ of this exercise.\n\n\ + Non-canonical values will\ + \ still parse as long as\ + \ they are well formed,\ + \ but will be re-emitted\ + \ in their canonical form.\ + \ (So always use canonical\ + \ form, or don't diff.)\n\ + \nThis format is intended\ + \ to make it difficult to\ + \ use these numbers without\ + \ writing some sort of special\ + \ handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point\ + \ implementation." + type: string + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: 'Adapts a secret into a projected + volume. + + + The contents of the target Secret''s Data + field will be presented in a projected + volume as files using the keys in the + Data field as the file names. Note that + this is identical to a secret volume source + without the default mode.' + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced Secret will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will + be projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not + present in the Secret, the volume + setup will error unless it is marked + optional. Paths must be relative and + may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key to + project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 + or a decimal value between 0 + and 511. YAML accepts both octal + and decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume + defaultMode will be used. This + might be in conflict with other + options that affect the file + mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the + key to. May not be an absolute + path. May not contain the path + element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + serviceAccountToken: + description: ServiceAccountTokenProjection + represents a projected service account + token volume. This projection can be used + to insert a service account token into + the pods runtime filesystem for use against + APIs (Kubernetes API Server or otherwise). + properties: + audience: + description: audience is the intended + audience of the token. A recipient + of a token must identify itself with + an identifier specified in the audience + of the token, and otherwise should + reject the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the + requested duration of validity of + the service account token. As the + token approaches expiration, the kubelet + volume plugin will proactively rotate + the service account token. The kubelet + will start trying to rotate the token + if the token is older than 80 percent + of its time to live or if the token + is older than 24 hours.Defaults to + 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative + to the mount point of the file to + project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + secret: + description: 'Adapts a Secret into a volume. + + + The contents of the target Secret''s Data field + will be presented in a volume as files using the + keys in the Data field as the file names. Secret + volumes support ownership management and SELinux + relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret + in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource references + the user''s PVC in the same namespace. This volume + finds the bound PV and mounts that volume for the + pod. A PersistentVolumeClaimVolumeSource is, essentially, + a wrapper around another type of volume that is + owned by someone else (the system). + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#persistentvolumeclaimvolumesource-v1-core' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this + volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly + setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + customInitContainers: + type: array + description: "A list of custom application init containers\ + \ that run within the coordinator cluster's Pods. The\n\ + \ custom init containers will run following the defined\ + \ sequence as the end of\n cluster's Pods init containers.\n\ + \nThe name used in this section will be prefixed with\ + \ the string `c-` so that when\n referencing them in\ + \ the .spec.containers section of SGInstanceProfile the\ + \ name used\n have to be prepended with the same prefix.\n\ + \n**Changing this field may require a restart.**\n\nSee:\ + \ https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application init container that\ + \ run within the cluster's Pods. The custom init\n containers\ + \ will run following the defined sequence as the end\ + \ of cluster's Pods init\n containers.\n\nThe name used\ + \ in this section will be prefixed with the string `c-`\ + \ so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n have\ + \ to be prepended with the same prefix.\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. + If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ + are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set + in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container and + any service environment variables. If a variable + cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: EnvVarSource represents a source + for the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and + their output format + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to\ + \ String() and AsInt64() accessors.\n\ + \nThe serialization format is:\n\n\ + ``` ::= \n\ + \n\t(Note that may be empty,\ + \ from the \"\" case in .)\n\ + \n ::= 0 | 1 | ...\ + \ | 9 ::= \ + \ | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki | Mi |\ + \ Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" |\ + \ k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I\ + \ didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo\ + \ matter which of the three exponent\ + \ forms is used, no quantity may represent\ + \ a number greater than 2^63-1 in\ + \ magnitude, nor may it have more\ + \ than 3 decimal places. Numbers larger\ + \ or more precise will be capped or\ + \ rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended\ + \ in the future if we require larger\ + \ or smaller quantities.\n\nWhen a\ + \ Quantity is parsed from a string,\ + \ it will remember the type of suffix\ + \ it had, and will use the same type\ + \ again when it is serialized.\n\n\ + Before serializing, Quantity will\ + \ be put in \"canonical form\". This\ + \ means that Exponent/suffix will\ + \ be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa)\ + \ such that:\n\n- No precision is\ + \ lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe\ + \ sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"\ + 1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented\ + \ by a floating point number. That\ + \ is the whole point of this exercise.\n\ + \nNon-canonical values will still\ + \ parse as long as they are well formed,\ + \ but will be re-emitted in their\ + \ canonical form. (So always use canonical\ + \ form, or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code\ + \ in the hopes that that will cause\ + \ implementors to also use a fixed\ + \ point implementation." + type: string + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects a + key of a Secret. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container + is starting. When a key exists in multiple sources, + the value associated with the last source will take + precedence. Values defined by an Env with a duplicate + key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects a ConfigMap + to populate the environment variables with. + + + The contents of the target ConfigMap''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects a Secret + to populate the environment variables with. + + + The contents of the target Secret''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag + is specified, or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that the + management system should take in response to container + lifecycle events. For the PostStart and PreStop + lifecycle handlers, management of the container + blocks until the action is complete, unless the + container process fails, in which case the handler + is aborted. + properties: + postStart: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a + DNS_LABEL. Each container in a pod must have a unique + name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that + port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container + will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the + data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: Number of port to expose on the + pod's IP address. This must be a valid port + number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the + host. If specified, this must be a valid port + number, 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port + in a pod must have a unique name. Name for + the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, + TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: 'Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. + + + This is an alpha field and requires enabling + the DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set + for containers.' + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string + securityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields + are present in both SecurityContext and PodSecurityContext. When + both are set, the values in SecurityContext take + precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation + is true always when the container is: 1) run + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name is + windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities + from running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. + Processes in privileged containers are essentially + equivalent to root on the host. Defaults to + false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default + is DefaultProcMount which uses the container + runtime defaults for readonly paths and masked + paths. This requires the ProcMountType feature + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of + the container process. Uses runtime default + if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot be set + when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must + run as a non-root user. If true, the Kubelet + will validate the image at runtime to ensure + that it does not run as UID 0 (root) and fail + to start the container if it does. If unset + or false, no such validation will be performed. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of + the container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note that + this field cannot be set when spec.os.name is + windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels to + be applied to the container + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile source + may be set. + properties: + localhostProfile: + description: localhostProfile indicates a + profile defined in a file on the node should + be used. The profile must be preconfigured + on the node to work. Must be a descending + path, relative to the kubelet's configured + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. + type: string + type: + description: 'type indicates which kind of + seccomp profile will be applied. Valid options + are: + + + Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile + should be used. Unconfined - no profile + should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions contain + Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. + Defaults to the user specified in image + metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If + this is not set, reads from stdin in the container + will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should + close the stdin channel after it has been opened + by a single attach. When stdin is true the stdin + stream will remain open across multiple attach sessions. + If stdinOnce is set to true, stdin is opened on + container start, is empty until the first client + attaches to stdin, and then remains open and accepts + data until the client disconnects, at which time + stdin is closed and remains closed until the container + is restarted. If this flag is false, a container + processes that reads from stdin will never receive + an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to + which the container''s termination message will + be written is mounted into the container''s filesystem. + Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message + should be populated. File will use the contents + of terminationMessagePath to populate the container + status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output + if the termination message file is empty and the + container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is + smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of + a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. Behaves similarly to SubPath but + environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. If not + specified, the container runtime's default will + be used, which might be configured in the container + image. Cannot be updated. + type: string + customContainers: + type: array + description: "A list of custom application containers that\ + \ run within the shards cluster's Pods.\n\nThe name used\ + \ in this section will be prefixed with the string `c-`\ + \ so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n have to\ + \ be prepended with the same prefix.\n\n**Changing this\ + \ field may require a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application container that run\ + \ within the cluster's Pods. The custom\n containers\ + \ will run following the defined sequence as the end\ + \ of cluster's Pods\n containers.\n\nThe name used in\ + \ this section will be prefixed with the string `c-`\ + \ so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n have\ + \ to be prepended with the same prefix.\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. + If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ + are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped + references will never be expanded, regardless of + whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set + in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container and + any service environment variables. If a variable + cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: EnvVarSource represents a source + for the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector represents + container resources (cpu, memory) and + their output format + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to\ + \ String() and AsInt64() accessors.\n\ + \nThe serialization format is:\n\n\ + ``` ::= \n\ + \n\t(Note that may be empty,\ + \ from the \"\" case in .)\n\ + \n ::= 0 | 1 | ...\ + \ | 9 ::= \ + \ | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki | Mi |\ + \ Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" |\ + \ k | M | G | T | P | E\n\n\t(Note\ + \ that 1024 = 1Ki but 1000 = 1k; I\ + \ didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo\ + \ matter which of the three exponent\ + \ forms is used, no quantity may represent\ + \ a number greater than 2^63-1 in\ + \ magnitude, nor may it have more\ + \ than 3 decimal places. Numbers larger\ + \ or more precise will be capped or\ + \ rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended\ + \ in the future if we require larger\ + \ or smaller quantities.\n\nWhen a\ + \ Quantity is parsed from a string,\ + \ it will remember the type of suffix\ + \ it had, and will use the same type\ + \ again when it is serialized.\n\n\ + Before serializing, Quantity will\ + \ be put in \"canonical form\". This\ + \ means that Exponent/suffix will\ + \ be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa)\ + \ such that:\n\n- No precision is\ + \ lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe\ + \ sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"\ + 1536Mi\"\n\nNote that the quantity\ + \ will NEVER be internally represented\ + \ by a floating point number. That\ + \ is the whole point of this exercise.\n\ + \nNon-canonical values will still\ + \ parse as long as they are well formed,\ + \ but will be re-emitted in their\ + \ canonical form. (So always use canonical\ + \ form, or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code\ + \ in the hopes that that will cause\ + \ implementors to also use a fixed\ + \ point implementation." + type: string + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects a + key of a Secret. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container + is starting. When a key exists in multiple sources, + the value associated with the last source will take + precedence. Values defined by an Env with a duplicate + key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects a ConfigMap + to populate the environment variables with. + + + The contents of the target ConfigMap''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects a Secret + to populate the environment variables with. + + + The contents of the target Secret''s Data + field will represent the key-value pairs as + environment variables.' + properties: + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag + is specified, or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that the + management system should take in response to container + lifecycle events. For the PostStart and PreStop + lifecycle handlers, management of the container + blocks until the action is complete, unless the + container process fails, in which case the handler + is aborted. + properties: + postStart: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a specific + action that should be taken in a lifecycle hook. + One and only one of the fields, except TCPSocket + must be specified. + properties: + exec: + description: ExecAction describes a "run in + container" action. + properties: + command: + description: Command is the command line + to execute inside the container, the + working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to explicitly + call out to that shell. Exit status + of 0 is treated as live/healthy and + non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a + custom header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon + output, so case-variant names + will be understood as the same + header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a "sleep" + action. + properties: + seconds: + description: Seconds is the number of + seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a + DNS_LABEL. Each container in a pod must have a unique + name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that + port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container + will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the + data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: Number of port to expose on the + pod's IP address. This must be a valid port + number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the + host. If specified, this must be a valid port + number, 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port + in a pod must have a unique name. Name for + the port that can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be UDP, + TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which + this resource resize policy applies. Supported + values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified + resource is resized. If not specified, it + defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: 'Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. + + + This is an alpha field and requires enabling + the DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set + for containers.' + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` ::=\ + \ \n\n\t(Note that \ + \ may be empty, from the \"\" case in .)\n\ + \n ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" ::=\ + \ | \ + \ ::= | |\ + \ ::= Ki | Mi\ + \ | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M |\ + \ G | T | P | E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose the capitalization.)\n\ + \n ::= \"e\" \ + \ | \"E\" ```\n\nNo matter\ + \ which of the three exponent forms is used,\ + \ no quantity may represent a number greater\ + \ than 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers larger\ + \ or more precise will be capped or rounded\ + \ up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we\ + \ require larger or smaller quantities.\n\n\ + When a Quantity is parsed from a string, it\ + \ will remember the type of suffix it had,\ + \ and will use the same type again when it\ + \ is serialized.\n\nBefore serializing, Quantity\ + \ will be put in \"canonical form\". This\ + \ means that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\n\ + - No precision is lost - No fractional digits\ + \ will be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign will\ + \ be omitted unless the number is negative.\n\ + \nExamples:\n\n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be serialized as\ + \ \"1536Mi\"\n\nNote that the quantity will\ + \ NEVER be internally represented by a floating\ + \ point number. That is the whole point of\ + \ this exercise.\n\nNon-canonical values will\ + \ still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical\ + \ form. (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these numbers\ + \ without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. Requests cannot + exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior + of individual containers in a pod. This field may + only be set for init containers, and the only allowed + value is "Always". For non-init containers or when + this field is not specified, the restart behavior + is defined by the Pod''s restart policy and the + container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: + this init container will be continually restarted + on exit until all regular containers have terminated. + Once all regular containers have completed, all + init containers with restartPolicy "Always" will + be shut down. This lifecycle differs from normal + init containers and is often referred to as a "sidecar" + container. Although this init container still starts + in the init container sequence, it does not wait + for the container to complete before proceeding + to the next init container. Instead, the next init + container starts immediately after this init container + is started, or after any startupProbe has successfully + completed.' + type: string + securityContext: + description: SecurityContext holds security configuration + that will be applied to a container. Some fields + are present in both SecurityContext and PodSecurityContext. When + both are set, the values in SecurityContext take + precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the + container process. AllowPrivilegeEscalation + is true always when the container is: 1) run + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name is + windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities + from running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. + Processes in privileged containers are essentially + equivalent to root on the host. Defaults to + false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default + is DefaultProcMount which uses the container + runtime defaults for readonly paths and masked + paths. This requires the ProcMountType feature + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of + the container process. Uses runtime default + if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. Note that this field cannot be set + when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must + run as a non-root user. If true, the Kubelet + will validate the image at runtime to ensure + that it does not run as UID 0 (root) and fail + to start the container if it does. If unset + or false, no such validation will be performed. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of + the container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. Note that + this field cannot be set when spec.os.name is + windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels to + be applied to the container + properties: + level: + description: Level is SELinux level label + that applies to the container. + type: string + role: + description: Role is a SELinux role label + that applies to the container. + type: string + type: + description: Type is a SELinux type label + that applies to the container. + type: string + user: + description: User is a SELinux user label + that applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile source + may be set. + properties: + localhostProfile: + description: localhostProfile indicates a + profile defined in a file on the node should + be used. The profile must be preconfigured + on the node to work. Must be a descending + path, relative to the kubelet's configured + seccomp profile location. Must be set if + type is "Localhost". Must NOT be set for + any other type. + type: string + type: + description: 'type indicates which kind of + seccomp profile will be applied. Valid options + are: + + + Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile + should be used. Unconfined - no profile + should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions contain + Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the + name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + All of a Pod's containers must have the + same effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true then + HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. + Defaults to the user specified in image + metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check to be + performed against a container to determine whether + it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run in container" + action. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside a + shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you + need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for + the probe to be considered failed after having + succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of the service + to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior + is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an action + based on HTTP Get requests. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name. + This will be canonicalized upon output, + so case-variant names will be understood + as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum value + is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for + the probe to be considered successful after + having failed. Defaults to 1. Must be 1 for + liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an action + based on opening a socket + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that can + hold an int32 or a string. When used in + JSON or YAML marshalling and unmarshalling, + it produces or consumes the inner type. This + allows you to have, for example, a JSON + field that can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the + pod needs to terminate gracefully upon probe + failure. The grace period is the duration in + seconds after the processes running in the pod + are sent a termination signal and the time when + the processes are forcibly halted with a kill + signal. Set this value longer than the expected + cleanup time for your process. If this value + is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature + gate. Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If + this is not set, reads from stdin in the container + will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should + close the stdin channel after it has been opened + by a single attach. When stdin is true the stdin + stream will remain open across multiple attach sessions. + If stdinOnce is set to true, stdin is opened on + container start, is empty until the first client + attaches to stdin, and then remains open and accepts + data until the client disconnects, at which time + stdin is closed and remains closed until the container + is restarted. If this flag is false, a container + processes that reads from stdin will never receive + an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to + which the container''s termination message will + be written is mounted into the container''s filesystem. + Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message + should be populated. File will use the contents + of terminationMessagePath to populate the container + status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output + if the termination message file is empty and the + container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is + smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of + a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. Behaves similarly to SubPath but + environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. If not + specified, the container runtime's default will + be used, which might be configured in the container + image. Cannot be updated. + type: string + customVolumeMounts: + type: object + description: Custom Pod volumes to mount into the specified + container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the specified + container's filesystem. + items: + description: 'VolumeMount describes a mounting of a + Volume within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + customInitVolumeMounts: + type: object + description: Custom Pod volumes to mount into the specified + init container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the specified + init container's filesystem. + items: + description: 'VolumeMount describes a mounting of a + Volume within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which the + container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable + references $(VAR_NAME) are expanded using the + container's environment. Defaults to "" (volume's + root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + configurations: + type: object + description: 'Shards custom configurations. + + ' + properties: + sgPostgresConfig: + type: string + description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. It must exist. When not set, a default + Postgres config, for the major version selected, is used. + + + **Changing this field may require a restart.** + + ' + sgPoolingConfig: + type: string + description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. Each pod contains a sidecar with + a connection pooler (currently: [PgBouncer](https://www.pgbouncer.org/)). + The connection pooler is implemented as a sidecar. + + + If not set, a default configuration will be used. Disabling + connection pooling altogether is possible if the disableConnectionPooling + property of the pods object is set to true. + + + **Changing this field may require a restart.** + + ' + patroni: + type: object + description: 'Allow to specify Patroni configuration that + will extend the generated one + + + If sharding type is `shardingsphere` then this section + is ignored. + + ' + properties: + dynamicConfig: + type: object + description: 'Allow to specify Patroni dynamic configuration + that will overwrite the generated one. See https://patroni.readthedocs.io/en/latest/dynamic_configuration.html + + + The following configuration fields will be ignored: + + + * synchronous_mode + + * synchronous_mode_strict + + * failsafe_mode + + * postgresql + + * standby_cluster + + + If sharding type is `shardingsphere` then this section + is ignored. + + ' + x-kubernetes-preserve-unknown-fields: true + initialConfig: + type: object + description: 'Allow to specify Patroni configuration + that will overwrite the generated one. See https://patroni.readthedocs.io/en/latest/yaml_configuration.html + + + The following configuration fields will be ignored: + + + * name + + * namespace + + * log + + * bootstrap + + * citus + + * postgresql # with the exception of postgresql.callbacks, + postgresql.pre_promote, postgresql.before_stop and + postgresql.pg_ctl_timeout + + * restapi + + * ctl + + * watchdog + + * tags + + + If sharding type is `shardingsphere` then this section + is ignored. + + + **This field can only be set on creation.** + + ' + x-kubernetes-preserve-unknown-fields: true + replication: + type: object + description: "This section allows to configure the global Postgres\ + \ replication mode.\n\nThe main replication group is implicit\ + \ and contains the total number of instances less the sum\ + \ of all\n instances in other replication groups.\n\nThe\ + \ total number of instances is always specified by `.spec.instances`.\n" + properties: + mode: + type: string + description: "The replication mode applied to the whole\ + \ cluster.\nPossible values are:\n* `async` (default)\n\ + * `sync`\n* `strict-sync`\n* `sync-all`\n* `strict-sync-all`\n\ + \n**async**\n\nWhen in asynchronous mode the cluster is\ + \ allowed to lose some committed transactions.\n When\ + \ the primary server fails or becomes unavailable for\ + \ any other reason a sufficiently healthy standby\n will\ + \ automatically be promoted to primary. Any transactions\ + \ that have not been replicated to that standby\n remain\ + \ in a \"forked timeline\" on the primary, and are effectively\ + \ unrecoverable (the data is still there,\n but recovering\ + \ it requires a manual recovery effort by data recovery\ + \ specialists).\n\n**sync**\n\nWhen in synchronous mode\ + \ a standby will not be promoted unless it is certain\ + \ that the standby contains all\n transactions that may\ + \ have returned a successful commit status to client (clients\ + \ can change the behavior\n per transaction using PostgreSQL’s\ + \ `synchronous_commit` setting. Transactions with `synchronous_commit`\n\ + \ values of `off` and `local` may be lost on fail over,\ + \ but will not be blocked by replication delays). This\n\ + \ means that the system may be unavailable for writes\ + \ even though some servers are available. System\n administrators\ + \ can still use manual failover commands to promote a\ + \ standby even if it results in transaction\n loss.\n\ + \nSynchronous mode does not guarantee multi node durability\ + \ of commits under all circumstances. When no suitable\n\ + \ standby is available, primary server will still accept\ + \ writes, but does not guarantee their replication. When\n\ + \ the primary fails in this mode no standby will be promoted.\ + \ When the host that used to be the primary comes\n back\ + \ it will get promoted automatically, unless system administrator\ + \ performed a manual failover. This behavior\n makes\ + \ synchronous mode usable with 2 node clusters.\n\nWhen\ + \ synchronous mode is used and a standby crashes, commits\ + \ will block until the primary is switched to standalone\n\ + \ mode. Manually shutting down or restarting a standby\ + \ will not cause a commit service interruption. Standby\ + \ will\n signal the primary to release itself from synchronous\ + \ standby duties before PostgreSQL shutdown is initiated.\n\ + \n**strict-sync**\n\nWhen it is absolutely necessary to\ + \ guarantee that each write is stored durably on at least\ + \ two nodes, use the strict\n synchronous mode. This\ + \ mode prevents synchronous replication to be switched\ + \ off on the primary when no synchronous\n standby candidates\ + \ are available. As a downside, the primary will not be\ + \ available for writes (unless the Postgres\n transaction\ + \ explicitly turns off `synchronous_mode` parameter),\ + \ blocking all client write requests until at least one\n\ + \ synchronous replica comes up.\n\n**Note**: Because\ + \ of the way synchronous replication is implemented in\ + \ PostgreSQL it is still possible to lose\n transactions\ + \ even when using strict synchronous mode. If the PostgreSQL\ + \ backend is cancelled while waiting to acknowledge\n\ + \ replication (as a result of packet cancellation due\ + \ to client timeout or backend failure) transaction changes\ + \ become\n visible for other backends. Such changes are\ + \ not yet replicated and may be lost in case of standby\ + \ promotion.\n\n**sync-all**\n\nThe same as `sync` but\ + \ `syncInstances` is ignored and the number of synchronous\ + \ instances is equals to the total number\n of instances\ + \ less one.\n\n**strict-sync-all**\n\nThe same as `strict-sync`\ + \ but `syncInstances` is ignored and the number of synchronous\ + \ instances is equals to the total number\n of instances\ + \ less one.\n" + default: async + syncInstances: + type: integer + minimum: 1 + description: "Number of synchronous standby instances. Must\ + \ be less than the total number of instances. It is set\ + \ to 1 by default.\n Only setteable if mode is `sync`\ + \ or `strict-sync`.\n" + initialization: + type: object + description: 'Allow to specify how the replicas are initialized. + + ' + properties: + mode: + type: string + description: "Allow to specify how the replicas are\ + \ initialized.\n\nPossible values are:\n\n* `FromPrimary`:\ + \ When this mode is used replicas will be always created\ + \ from the primary using `pg_basebackup`.\n* `FromReplica`:\ + \ When this mode is used replicas will be created\ + \ from another existing replica using\n `pg_basebackup`.\ + \ Fallsback to `FromPrimary` if there's no replica\ + \ or it fails.\n* `FromExistingBackup`: When this\ + \ mode is used replicas will be created from an existing\ + \ SGBackup. If `backupNewerThan` is set\n the SGBackup\ + \ must be newer than its value. When this mode fails\ + \ to restore an SGBackup it will try with a previous\ + \ one (if exists).\n Fallsback to `FromReplica` if\ + \ there's no backup left or it fails.\n* `FromNewlyCreatedBackup`:\ + \ When this mode is used replicas will be created\ + \ from a newly created SGBackup.\n Fallsback to `FromExistingBackup`\ + \ if `backupNewerThan` is set and exists a recent\ + \ backup newer than its value or it fails.\n" + default: FromExistingBackup + backupNewerThan: + type: string + description: "An ISO 8601 duration in the format `PnDTnHnMn.nS`,\ + \ that specifies how old an SGBackup have to be in\ + \ order to be seleceted\n to initialize a replica.\n\ + \nWhen `FromExistingBackup` mode is set this field\ + \ restrict the selection of SGBackup to be used for\ + \ recovery newer than the\n specified value. \n\n\ + When `FromNewlyCreatedBackup` mode is set this field\ + \ skip the creation SGBackup to be used for recovery\ + \ if one newer than\n the specified value exists.\ + \ \n" + backupRestorePerformance: + type: object + description: 'Configuration that affects the backup + network and disk usage performance during recovery. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth used + when storing a backup. In bytes (per second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several concurrent + streams to read the data. This parameter configures + the number of parallel streams to use. By default, + it''s set to the minimum between the number of + file to read and 10. + + ' + metadata: + type: object + description: Metadata information from shards cluster created + resources. + properties: + annotations: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + to be passed to resources created and managed by StackGres. + properties: + allResources: + type: object + description: Annotations to attach to any resource created + or managed by StackGres. + additionalProperties: + type: string + clusterPods: + type: object + description: Annotations to attach to pods created or + managed by StackGres. + additionalProperties: + type: string + services: + type: object + description: Annotations to attach to all services created + or managed by StackGres. + additionalProperties: + type: string + primaryService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-primary` service. + additionalProperties: + type: string + replicasService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-replicas` service. + additionalProperties: + type: string + labels: + type: object + description: Custom Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to be passed to resources created and managed by StackGres. + properties: + clusterPods: + type: object + description: Labels to attach to Pods created or managed + by StackGres. + additionalProperties: + type: string + services: + type: object + description: Labels to attach to Services and Endpoints + created or managed by StackGres. + additionalProperties: + type: string + overrides: + type: array + description: 'Any shard can be overriden by this section. + + ' + items: + type: object + description: 'Any shard can be overriden by this section. + + ' + required: + - index + properties: + index: + type: integer + minimum: 0 + description: 'Identifier of the shard StackGres cluster + to override (starting from 0) + + ' + instancesPerCluster: + type: integer + minimum: 0 + description: "Number of StackGres instances per shard's\ + \ StackGres cluster. Each instance contains one Postgres\ + \ server.\n Out of all of the Postgres servers, one\ + \ is elected as the primary, the rest remain as read-only\ + \ replicas.\n" + autoscaling: + type: object + description: 'This section allows to configure vertical + Pod autoscaling for the SGCluster''s Pods. + + + Vertical Pod Autoscaling will use cpu and memory usage + as the metric to control the upscale or downscale of + the Pod requests and limits resources. + + Vertical Pod Autoscaling requires the [Vertical Pod + Autoscaler operator](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) + to be installed in the Kuberentes cluster. + + ' + properties: + mode: + type: string + description: 'Allow to enable or disable any of horizontal + and vertical Pod autoscaling. + + + Possible values are: + + * `vertical`: only vertical Pod autoscaling will + be enabled (default) + + * `none`: all autoscaling will be disabled + + ' + enum: + - vertical + - none + default: vertical + minAllowed: + type: object + description: 'Allow to define the lower bound for + Pod resources of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the lower bound + for Pod resources of patroni container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the + patroni container + memory: + type: string + description: The minimum allowed memory for + the patroni container + pgbouncer: + type: object + description: 'Allow to define the lower bound + for Pod resources of pgbouncer container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the + pgbouncer container + memory: + type: string + description: The minimum allowed memory for + the pgbouncer container + envoy: + type: object + description: 'Allow to define the lower bound + for Pod resources of envoy container + + ' + properties: + cpu: + type: string + description: The minimum allowed CPU for the + envoy container + memory: + type: string + description: The minimum allowed memory for + the envoy container + maxAllowed: + type: object + description: 'Allow to define the higher bound for + Pod resources of patroni, pgbouncer and envoy containers + + ' + properties: + patroni: + type: object + description: 'Allow to define the higher bound + for Pod resources of patroni container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the + patroni container + memory: + type: string + description: The maximum allowed memory for + the patroni container + pgbouncer: + type: object + description: 'Allow to define the higher bound + for Pod resources of pgbouncer container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the + pgbouncer container + memory: + type: string + description: The maximum allowed memory for + the pgbouncer container + envoy: + type: object + description: 'Allow to define the higher bound + for Pod resources of envoy container + + ' + properties: + cpu: + type: string + description: The maximum allowed CPU for the + envoy container + memory: + type: string + description: The maximum allowed memory for + the envoy container + horizontal: + type: object + description: 'Section to configure horizontal Pod + autoscaling aspects. + + ' + properties: + eplicasConnectionsUsageTarget: + type: string + description: 'The target value for replicas connections + used in order to trigger the upscale of replica + instances. + + ' + default: '0.8' + replicasConnectionsUsageMetricType: + type: string + description: 'The metric type for connections + used metric. See https://keda.sh/docs/latest/concepts/scaling-deployments/#triggers + + ' + default: AverageValue + cooldownPeriod: + type: integer + description: 'The period in seconds before the + downscale of replica instances can be triggered. + + ' + default: 300 + pollingInterval: + type: integer + description: 'The interval in seconds to check + if the scaleup or scaledown have to be triggered. + + ' + default: 30 + vertical: + type: object + description: 'Section to configure vertical Pod autoscaling + aspects. + + ' + properties: + recommender: + type: string + description: 'Recommender responsible for generating + recommendation for vertical Pod autoscaling. + If not specified the default one will be used. + + ' + sgInstanceProfile: + type: string + description: 'Name of the [SGInstanceProfile](https://stackgres.io/doc/latest/04-postgres-cluster-management/03-resource-profiles/). + A SGInstanceProfile defines CPU and memory limits. Must + exist before creating a cluster. When no profile is + set, a default (currently: 1 core, 2 GiB RAM) one is + used. + + ' + managedSql: + type: object + description: 'This section allows to reference SQL scripts + that will be applied to the cluster live. + + ' + properties: + continueOnSGScriptError: + type: boolean + description: If true, when any entry of any `SGScript` + fail will not prevent subsequent `SGScript` from + being executed. By default is `false`. + scripts: + type: array + description: 'A list of script references that will + be executed in sequence. + + ' + items: + type: object + description: "A script reference. Each version of\ + \ each entry of the script referenced will be\ + \ executed exactly once following the sequence\ + \ defined\n in the referenced script and skipping\ + \ any script entry that have already been executed.\n" + properties: + id: + type: integer + description: The id is immutable and must be + unique across all the `SGScript` entries. + It is replaced by the operator and is used + to identify the `SGScript` entry. + sgScript: + type: string + description: A reference to an `SGScript` + pods: + type: object + description: Cluster pod's configuration. + properties: + persistentVolume: + type: object + description: Pod's persistent volume configuration. + required: + - size + properties: + size: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$ + description: 'Size of the PersistentVolume set + for each instance of the cluster. This size + is specified either in Mebibytes, Gibibytes + or Tebibytes (multiples of 2^20, 2^30 or 2^40, + respectively). + + ' + storageClass: + type: string + description: 'Name of an existing StorageClass + in the Kubernetes cluster, used to create the + PersistentVolumes for the instances of the cluster. + + ' + disableConnectionPooling: + type: boolean + description: 'If set to `true`, avoids creating a + connection pooling (using [PgBouncer](https://www.pgbouncer.org/)) + sidecar. + + + **Changing this field may require a restart.** + + ' + disableMetricsExporter: + type: boolean + description: '**Deprecated** use instead .spec.configurations.observability.disableMetrics. + + ' + disablePostgresUtil: + type: boolean + description: 'If set to `true`, avoids creating the + `postgres-util` sidecar. This sidecar contains usual + Postgres administration utilities *that are not + present in the main (`patroni`) container*, like + `psql`. Only disable if you know what you are doing. + + + **Changing this field may require a restart.** + + ' + disableEnvoy: + type: boolean + description: 'If set to `true`, avoids creating the + `envoy` sidecar. This sidecar is used as the endge + proxy for the cluster''s Pods providing extra metrics + to the monitoring layer. + + + **Changing this field may require a restart.** + + ' + resources: + type: object + description: Pod custom resources configuration. + properties: + enableClusterLimitsRequirements: + type: boolean + description: 'When enabled resource limits for + containers other than the patroni container + wil be set just like for patroni contianer as + specified in the SGInstanceProfile. + + + **Changing this field may require a restart.** + + ' + disableResourcesRequestsSplitFromTotal: + type: boolean + description: "When set to `true` the resources\ + \ requests values in fields `SGInstanceProfile.spec.requests.cpu`\ + \ and `SGInstanceProfile.spec.requests.memory`\ + \ will represent the resources\n requests of\ + \ the patroni container and the total resources\ + \ requests calculated by adding the resources\ + \ requests of all the containers (including\ + \ the patroni container).\n\n**Changing this\ + \ field may require a restart.**\n" + failWhenTotalIsHigher: + type: boolean + description: "When set to `true` the reconciliation\ + \ of the cluster will fail if `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of\ + \ the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the\ + \ total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`.\n\ + \nWhen `false` (the default) and `disableResourcesRequestsSplitFromTotal`\ + \ is not set or set to `false` and the sum of\ + \ the CPU or memory\n of all the containers\ + \ except patroni is equals or higher than the\ + \ total specified in `SGInstanceProfile.spec.requests.cpu`\ + \ or `SGInstanceProfile.spec.requests.memory`\n\ + \ then the patroni container resources will\ + \ be set to 0.\n" + scheduling: + type: object + description: 'Pod custom scheduling, affinity and + topology spread constratins configuration. + + + **Changing this field may require a restart.** + + ' + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which + must be true for the pod to fit on a node. Selector + which must match a node''s labels for the pod + to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached + to tolerates any taint that matches the triple + using the matching operator + . + properties: + effect: + description: Effect indicates the taint + effect to match. Empty means match all + taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that the + toleration applies to. Empty means match + all taint keys. If the key is empty, operator + must be Exists; this combination means + to match all values and all keys. + type: string + operator: + description: Operator represents a key's + relationship to the value. Valid operators + are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints + of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration (which + must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. + By default, it is not set, which means + tolerate the taint forever (do not evict). + Zero and negative values will be treated + as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the + toleration matches to. If the operator + is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node + affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + affinity expressions specified by this field, + but it may choose a node that violates one + or more of the expressions. The node that + is most preferred is the one with the greatest + sum of weights, i.e. for each node that + meets all of the scheduling requirements + (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if + the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the + most preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit + weight 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains + values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents a + key's relationship to a + set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string + values. If the operator + is In or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the values + array must be empty. If + the operator is Gt or Lt, + the values array must have + a single element, which + will be interpreted as an + integer. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains + values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents a + key's relationship to a + set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string + values. If the operator + is In or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the values + array must be empty. If + the operator is Gt or Lt, + the values array must have + a single element, which + will be interpreted as an + integer. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the + union of the results of one or more label + queries over a set of nodes; that is, it + represents the OR of the selectors represented + by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains + values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents a + key's relationship to a + set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string + values. If the operator + is In or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the values + array must be empty. If + the operator is Gt or Lt, + the values array must have + a single element, which + will be interpreted as an + integer. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains + values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents a + key's relationship to a + set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string + values. If the operator + is In or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the values + array must be empty. If + the operator is Gt or Lt, + the values array must have + a single element, which + will be interpreted as an + integer. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's + priority. "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate the + highest priorities with the former being the + highest priority. Any other name must be defined + by creating a PriorityClass object with that + name. If not specified, the pod priority will + be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter + pod affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + affinity expressions specified by this field, + but it may choose a node that violates one + or more of the expressions. The node that + is most preferred is the one with the greatest + sum of weights, i.e. for each node that + meets all of the scheduling requirements + (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if + the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest + sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value of + the label with key matches + that of any node on which a pod of + the set of pods is running + properties: + labelSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a + set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key in (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key notin (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The + term is applied to the union of + the namespaces listed in this + field and the ones selected by + namespaceSelector. null or empty + namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be + co-located (affinity) or not co-located + (anti-affinity) with the pods + matching the labelSelector in + the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto + the node. If the affinity requirements specified + by this field cease to be met at some point + during pod execution (e.g. due to a pod + label update), the system may or may not + try to eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not + co-located (anti-affinity) with, where + co-located is defined as running on a + node whose value of the label with key + matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The + result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector + requirement is a selector that + contains values, a key, and + an operator that relates the + key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In or + NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be + empty. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single + {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator is + "In", and the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set + of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with `LabelSelector` + as `key in (value)` to select the + group of existing pods which pods + will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MatchLabelKeys and + LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a + set of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods + will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The + result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector + requirement is a selector that + contains values, a key, and + an operator that relates the + key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In or + NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be + empty. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single + {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator is + "In", and the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a + static list of namespace names that + the term applies to. The term is applied + to the union of the namespaces listed + in this field and the ones selected + by namespaceSelector. null or empty + namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running on + a node whose value of the label with + key topologyKey matches that of any + node on which any of the selected + pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of + inter pod anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + anti-affinity expressions specified by this + field, but it may choose a node that violates + one or more of the expressions. The node + that is most preferred is the one with the + greatest sum of weights, i.e. for each node + that meets all of the scheduling requirements + (resource request, requiredDuringScheduling + anti-affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" to the + sum if the node has pods which matches the + corresponding podAffinityTerm; the node(s) + with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value of + the label with key matches + that of any node on which a pod of + the set of pods is running + properties: + labelSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a + set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key in (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key notin (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The + term is applied to the union of + the namespaces listed in this + field and the ones selected by + namespaceSelector. null or empty + namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be + co-located (affinity) or not co-located + (anti-affinity) with the pods + matching the labelSelector in + the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto + the node. If the anti-affinity requirements + specified by this field cease to be met + at some point during pod execution (e.g. + due to a pod label update), the system may + or may not try to eventually evict the pod + from its node. When there are multiple elements, + the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not + co-located (anti-affinity) with, where + co-located is defined as running on a + node whose value of the label with key + matches that of any node + on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The + result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector + requirement is a selector that + contains values, a key, and + an operator that relates the + key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In or + NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be + empty. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single + {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator is + "In", and the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set + of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with `LabelSelector` + as `key in (value)` to select the + group of existing pods which pods + will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MatchLabelKeys and + LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a + set of pod label keys to select which + pods will be taken into consideration. + The keys are used to lookup values + from the incoming pod labels, those + key-value labels are merged with `LabelSelector` + as `key notin (value)` to select the + group of existing pods which pods + will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming + pod labels will be ignored. The default + value is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't + set. This is an alpha field and requires + enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label + query over a set of resources. The + result of matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null label + selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector + requirement is a selector that + contains values, a key, and + an operator that relates the + key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In or + NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be + empty. This array is replaced + during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single + {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator is + "In", and the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a + static list of namespace names that + the term applies to. The term is applied + to the union of the namespaces listed + in this field and the ones selected + by namespaceSelector. null or empty + namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running on + a node whose value of the label with + key topologyKey matches that of any + node on which any of the selected + pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + topologySpreadConstraints: + description: 'TopologySpreadConstraints describes + how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way + which abides by the constraints. All topologySpreadConstraints + are ANDed. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#topologyspreadconstraint-v1-core' + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the given + topology. + properties: + labelSelector: + description: A label selector is a label + query over a set of resources. The result + of matchLabels and matchExpressions are + ANDed. An empty label selector matches + all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + matchLabelKeys: + description: 'MatchLabelKeys is a set of + pod label keys to select the pods over + which spreading will be calculated. The + keys are used to lookup values from the + incoming pod labels, those key-value labels + are ANDed with labelSelector to select + the group of existing pods over which + spreading will be calculated for the incoming + pod. The same key is forbidden to exist + in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector + isn''t set. Keys that don''t exist in + the incoming pod labels will be ignored. + A null or empty list means only match + against labelSelector. + + + This is a beta field and requires the + MatchLabelKeysInPodTopologySpread feature + gate to be enabled (enabled by default).' + items: + type: string + type: array + maxSkew: + description: 'MaxSkew describes the degree + to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference + between the number of matching pods in + the target topology and the global minimum. + The global minimum is the minimum number + of matching pods in an eligible domain + or zero if the number of eligible domains + is less than MinDomains. For example, + in a 3-zone cluster, MaxSkew is set to + 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global + minimum is 1. | zone1 | zone2 | zone3 + | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled + to zone3 to become 2/2/2; scheduling it + onto zone1(zone2) would make the ActualSkew(3-1) + on zone1(zone2) violate MaxSkew(1). - + if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to + topologies that satisfy it. It''s a required + field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + minDomains: + description: 'MinDomains indicates a minimum + number of eligible domains. When the number + of eligible domains with matching topology + keys is less than minDomains, Pod Topology + Spread treats "global minimum" as 0, and + then the calculation of Skew is performed. + And when the number of eligible domains + with matching topology keys equals or + greater than minDomains, this value has + no effect on scheduling. As a result, + when the number of eligible domains is + less than minDomains, scheduler won''t + schedule more than maxSkew Pods to those + domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. + Valid values are integers greater than + 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew + is set to 2, MinDomains is set to 5 and + pods with the same labelSelector spread + as 2/2/2: | zone1 | zone2 | zone3 | | P + P | P P | P P | The number of domains + is less than 5(MinDomains), so "global + minimum" is treated as 0. In this situation, + new pod with the same labelSelector cannot + be scheduled, because computed skew will + be 3(3 - 0) if new Pod is scheduled to + any of the three zones, it will violate + MaxSkew. + + + This is a beta field and requires the + MinDomainsInPodTopologySpread feature + gate to be enabled (enabled by default).' + format: int32 + type: integer + nodeAffinityPolicy: + description: 'NodeAffinityPolicy indicates + how we will treat Pod''s nodeAffinity/nodeSelector + when calculating pod topology spread skew. + Options are: - Honor: only nodes matching + nodeAffinity/nodeSelector are included + in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in + the calculations. + + + If this value is nil, the behavior is + equivalent to the Honor policy. This is + a beta-level feature default enabled by + the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + nodeTaintsPolicy: + description: 'NodeTaintsPolicy indicates + how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with + tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: + node taints are ignored. All nodes are + included. + + + If this value is nil, the behavior is + equivalent to the Ignore policy. This + is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + topologyKey: + description: TopologyKey is the key of node + labels. Nodes that have a label with this + key and identical values are considered + to be in the same topology. We consider + each as a "bucket", and try + to put balanced number of pods into each + bucket. We define a domain as a particular + instance of a topology. Also, we define + an eligible domain as a domain whose nodes + meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey + is "kubernetes.io/hostname", each Node + is a domain of that topology. And, if + TopologyKey is "topology.kubernetes.io/zone", + each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: "WhenUnsatisfiable indicates\ + \ how to deal with a pod if it doesn't\ + \ satisfy the spread constraint. - DoNotSchedule\ + \ (default) tells the scheduler not to\ + \ schedule it. - ScheduleAnyway tells\ + \ the scheduler to schedule the pod in\ + \ any location,\n but giving higher precedence\ + \ to topologies that would help reduce\ + \ the\n skew.\nA constraint is considered\ + \ \"Unsatisfiable\" for an incoming pod\ + \ if and only if every possible node assignment\ + \ for that pod would violate \"MaxSkew\"\ + \ on some topology. For example, in a\ + \ 3-zone cluster, MaxSkew is set to 1,\ + \ and pods with the same labelSelector\ + \ spread as 3/1/1: | zone1 | zone2 | zone3\ + \ | | P P P | P | P | If WhenUnsatisfiable\ + \ is set to DoNotSchedule, incoming pod\ + \ can only be scheduled to zone2(zone3)\ + \ to become 3/2/1(3/1/2) as ActualSkew(2-1)\ + \ on zone2(zone3) satisfies MaxSkew(1).\ + \ In other words, the cluster can still\ + \ be imbalanced, but scheduler won't make\ + \ it *more* imbalanced. It's a required\ + \ field." + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + backup: + type: object + description: Backup Pod custom scheduling and + affinity configuration. + properties: + nodeSelector: + description: 'Node affinity is a group of + node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified by + this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node matches the corresponding + matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit + weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches + no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type + implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents + the union of the results of one or more + label queries over a set of nodes; that + is, it represents the OR of the selectors + represented by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type + implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + tolerations: + description: 'Node affinity is a group of + node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified by + this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node matches the corresponding + matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit + weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches + no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type + implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents + the union of the results of one or more + label queries over a set of nodes; that + is, it represents the OR of the selectors + represented by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type + implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + nodeAffinity: + description: 'Node affinity is a group of + node affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified by + this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node matches the corresponding + matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit + weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches + no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type + implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents + the union of the results of one or more + label queries over a set of nodes; that + is, it represents the OR of the selectors + represented by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type + implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by node's + labels. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by node's + fields. + items: + description: A node selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array + of string values. If + the operator is In or + NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the + values array must be + empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will + be interpreted as an + integer. This array + is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's + priority. "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate + the highest priorities with the former being + the highest priority. Any other name must + be defined by creating a PriorityClass object + with that name. If not specified, the pod + priority will be default or zero if there + is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter + pod affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified by + this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node has pods which matches the + corresponding podAffinityTerm; the node(s) + with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector + is a label query over a set + of resources. The result of + matchLabels and matchExpressions + are ANDed. An empty label + selector matches all objects. + A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys + is a set of pod label keys + to select which pods will + be taken into consideration. + The keys are used to lookup + values from the incoming pod + labels, those key-value labels + are merged with `LabelSelector` + as `key in (value)` to select + the group of existing pods + which pods will be taken into + consideration for the incoming + pod's pod (anti) affinity. + Keys that don't exist in the + incoming pod labels will be + ignored. The default value + is empty. The same key is + forbidden to exist in both + MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot + be set when LabelSelector + isn't set. This is an alpha + field and requires enabling + MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys + is a set of pod label keys + to select which pods will + be taken into consideration. + The keys are used to lookup + values from the incoming pod + labels, those key-value labels + are merged with `LabelSelector` + as `key notin (value)` to + select the group of existing + pods which pods will be taken + into consideration for the + incoming pod's pod (anti) + affinity. Keys that don't + exist in the incoming pod + labels will be ignored. The + default value is empty. The + same key is forbidden to exist + in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha + field and requires enabling + MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector + is a label query over a set + of resources. The result of + matchLabels and matchExpressions + are ANDed. An empty label + selector matches all objects. + A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace + names that the term applies + to. The term is applied to + the union of the namespaces + listed in this field and the + ones selected by namespaceSelector. + null or empty namespaces list + and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met + at scheduling time, the pod will not + be scheduled onto the node. If the affinity + requirements specified by this field + cease to be met at some point during + pod execution (e.g. due to a pod label + update), the system may or may not try + to eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding to + each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value of + the label with key matches + that of any node on which a pod of + the set of pods is running + properties: + labelSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a + set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key in (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key notin (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The + term is applied to the union of + the namespaces listed in this + field and the ones selected by + namespaceSelector. null or empty + namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be + co-located (affinity) or not co-located + (anti-affinity) with the pods + matching the labelSelector in + the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group + of inter pod anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the anti-affinity expressions specified + by this field, but it may choose a node + that violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by + iterating through the elements of this + field and adding "weight" to the sum + if the node has pods which matches the + corresponding podAffinityTerm; the node(s) + with the highest sum are the most preferred. + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + properties: + podAffinityTerm: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector + is a label query over a set + of resources. The result of + matchLabels and matchExpressions + are ANDed. An empty label + selector matches all objects. + A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys + is a set of pod label keys + to select which pods will + be taken into consideration. + The keys are used to lookup + values from the incoming pod + labels, those key-value labels + are merged with `LabelSelector` + as `key in (value)` to select + the group of existing pods + which pods will be taken into + consideration for the incoming + pod's pod (anti) affinity. + Keys that don't exist in the + incoming pod labels will be + ignored. The default value + is empty. The same key is + forbidden to exist in both + MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot + be set when LabelSelector + isn't set. This is an alpha + field and requires enabling + MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys + is a set of pod label keys + to select which pods will + be taken into consideration. + The keys are used to lookup + values from the incoming pod + labels, those key-value labels + are merged with `LabelSelector` + as `key notin (value)` to + select the group of existing + pods which pods will be taken + into consideration for the + incoming pod's pod (anti) + affinity. Keys that don't + exist in the incoming pod + labels will be ignored. The + default value is empty. The + same key is forbidden to exist + in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha + field and requires enabling + MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector + is a label query over a set + of resources. The result of + matchLabels and matchExpressions + are ANDed. An empty label + selector matches all objects. + A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace + names that the term applies + to. The term is applied to + the union of the namespaces + listed in this field and the + ones selected by namespaceSelector. + null or empty namespaces list + and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met + at scheduling time, the pod will not + be scheduled onto the node. If the anti-affinity + requirements specified by this field + cease to be met at some point during + pod execution (e.g. due to a pod label + update), the system may or may not try + to eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding to + each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value of + the label with key matches + that of any node on which a pod of + the set of pods is running + properties: + labelSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a + set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key in (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is + a set of pod label keys to select + which pods will be taken into + consideration. The keys are used + to lookup values from the incoming + pod labels, those key-value labels + are merged with `LabelSelector` + as `key notin (value)` to select + the group of existing pods which + pods will be taken into consideration + for the incoming pod's pod (anti) + affinity. Keys that don't exist + in the incoming pod labels will + be ignored. The default value + is empty. The same key is forbidden + to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector + isn't set. This is an alpha field + and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is + a label query over a set of resources. + The result of matchLabels and + matchExpressions are ANDed. An + empty label selector matches all + objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a + key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator + represents a key's relationship + to a set of values. + Valid operators are + In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is + an array of string values. + If the operator is In + or NotIn, the values + array must be non-empty. + If the operator is Exists + or DoesNotExist, the + values array must be + empty. This array is + replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in the + matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The + term is applied to the union of + the namespaces listed in this + field and the ones selected by + namespaceSelector. null or empty + namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be + co-located (affinity) or not co-located + (anti-affinity) with the pods + matching the labelSelector in + the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + managementPolicy: + type: string + description: "managementPolicy controls how pods are\ + \ created during initial scale up, when replacing\ + \ pods\n on nodes, or when scaling down. The default\ + \ policy is `OrderedReady`, where pods are created\n\ + \ in increasing order (pod-0, then pod-1, etc)\ + \ and the controller will wait until each pod is\n\ + \ ready before continuing. When scaling down, the\ + \ pods are removed in the opposite order.\n The\ + \ alternative policy is `Parallel` which will create\ + \ pods in parallel to match the desired\n scale\ + \ without waiting, and on scale down will delete\ + \ all pods at once.\n" + customVolumes: + type: array + description: "A list of custom volumes that may be\ + \ used along with any container defined in\n customInitContainers\ + \ or customContainers sections for the shards.\n\ + \nThe name used in this section will be prefixed\ + \ with the string `c-` so that when\n referencing\ + \ them in the customInitContainers or customContainers\ + \ sections the name used\n have to be prepended\ + \ with the same prefix.\n\nOnly the following volume\ + \ types are allowed: configMap, downwardAPI, emptyDir,\n\ + \ gitRepo, glusterfs, hostPath, nfs, projected\ + \ and secret\n\n**Changing this field may require\ + \ a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + items: + type: object + description: "A custom volume that may be used along\ + \ with any container defined in\n customInitContainers\ + \ or customContainers sections.\n\nThe name used\ + \ in this section will be prefixed with the string\ + \ `c-` so that when\n referencing them in the\ + \ customInitContainers or customContainers sections\ + \ the name used\n have to be prepended with the\ + \ same prefix.\n\nOnly the following volume types\ + \ are allowed: configMap, downwardAPI, emptyDir,\n\ + \ gitRepo, glusterfs, hostPath, nfs, projected\ + \ and secret\n\n**Changing this field may require\ + \ a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volume-v1-core\n" + properties: + name: + description: 'name of the custom volume. The + name will be implicitly prefixed with `c-` + to avoid clashing with internal operator volume + names. Must be a DNS_LABEL and unique within + the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + + ' + type: string + configMap: + description: 'Adapts a ConfigMap into a volume. + + + The contents of the target ConfigMap''s Data + field will be presented in a volume as files + using the keys in the Data field as the file + names, unless the items element is populated + with specific mappings of keys to paths. ConfigMap + volumes support ownership management and SELinux + relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#configmapvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is optional: mode + bits used to set permissions on created + files by default. Must be an octal value + between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal + and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. + Directories within the path are not affected + by this setting. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each + key-value pair in the Data field of the + referenced ConfigMap will be projected + into the volume as a file whose name is + the key and content is the value. If specified, + the listed keys will be projected into + the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the ConfigMap, + the volume setup will error unless it + is marked optional. Paths must be relative + and may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on + this file. Must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: 'DownwardAPIVolumeSource represents + a volume containing downward API info. Downward + API volumes support ownership management and + SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#downwardapivolumesource-v1-core' + properties: + defaultMode: + description: 'Optional: mode bits to use + on created files by default. Must be a + Optional: mode bits used to set permissions + on created files by default. Must be an + octal value between 0000 and 0777 or a + decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. Defaults to 0644. Directories within + the path are not affected by this setting. + This might be in conflict with other options + that affect the file mode, like fsGroup, + and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: Items is a list of downward + API volume file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in + terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified API + version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value between + 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector + represents container resources (cpu, + memory) and their output format + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number.\ + \ It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition\ + \ to String() and AsInt64()\ + \ accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\ + \n\t(Note that may\ + \ be empty, from the \"\" case\ + \ in .)\n\n\ + \ ::= 0 | 1 | ...\ + \ | 9 ::=\ + \ | \ + \ ::= \ + \ | . | .\ + \ | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki |\ + \ Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units;\ + \ See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m |\ + \ \"\" | k | M | G | T | P |\ + \ E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" |\ + \ \"E\" ```\n\ + \nNo matter which of the three\ + \ exponent forms is used, no\ + \ quantity may represent a number\ + \ greater than 2^63-1 in magnitude,\ + \ nor may it have more than\ + \ 3 decimal places. Numbers\ + \ larger or more precise will\ + \ be capped or rounded up. (E.g.:\ + \ 0.1m will rounded up to 1m.)\ + \ This may be extended in the\ + \ future if we require larger\ + \ or smaller quantities.\n\n\ + When a Quantity is parsed from\ + \ a string, it will remember\ + \ the type of suffix it had,\ + \ and will use the same type\ + \ again when it is serialized.\n\ + \nBefore serializing, Quantity\ + \ will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down\ + \ (with a corresponding increase\ + \ or decrease in Mantissa) such\ + \ that:\n\n- No precision is\ + \ lost - No fractional digits\ + \ will be emitted - The exponent\ + \ (or suffix) is as large as\ + \ possible.\n\nThe sign will\ + \ be omitted unless the number\ + \ is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be\ + \ serialized as \"1536Mi\"\n\ + \nNote that the quantity will\ + \ NEVER be internally represented\ + \ by a floating point number.\ + \ That is the whole point of\ + \ this exercise.\n\nNon-canonical\ + \ values will still parse as\ + \ long as they are well formed,\ + \ but will be re-emitted in\ + \ their canonical form. (So\ + \ always use canonical form,\ + \ or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without\ + \ writing some sort of special\ + \ handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point\ + \ implementation." + type: string + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'Represents an empty directory + for a pod. Empty directory volumes support + ownership management and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#emptydirvolumesource-v1-core' + properties: + medium: + description: 'medium represents what type + of storage medium should back this directory. + The default is "" which means to use the + node''s default medium. Must be an empty + string (default) or Memory. More info: + https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling in\ + \ JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t(Note\ + \ that may be empty, from the\ + \ \"\" case in .)\n\n\ + \ ::= 0 | 1 | ... | 9 \ + \ ::= | \ + \ ::= | .\ + \ | . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International\ + \ System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k |\ + \ M | G | T | P | E\n\n\t(Note that 1024\ + \ = 1Ki but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in the\ + \ future if we require larger or smaller\ + \ quantities.\n\nWhen a Quantity is parsed\ + \ from a string, it will remember the\ + \ type of suffix it had, and will use\ + \ the same type again when it is serialized.\n\ + \nBefore serializing, Quantity will be\ + \ put in \"canonical form\". This means\ + \ that Exponent/suffix will be adjusted\ + \ up or down (with a corresponding increase\ + \ or decrease in Mantissa) such that:\n\ + \n- No precision is lost - No fractional\ + \ digits will be emitted - The exponent\ + \ (or suffix) is as large as possible.\n\ + \nThe sign will be omitted unless the\ + \ number is negative.\n\nExamples:\n\n\ + - 1.5 will be serialized as \"1500m\"\ + \ - 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are\ + \ well formed, but will be re-emitted\ + \ in their canonical form. (So always\ + \ use canonical form, or don't diff.)\n\ + \nThis format is intended to make it difficult\ + \ to use these numbers without writing\ + \ some sort of special handling code in\ + \ the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + type: object + gitRepo: + description: 'Represents a volume that is populated + with the contents of a git repository. Git + repo volumes do not support ownership management. + Git repo volumes support SELinux relabeling. + + + DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir + into an InitContainer that clones the repo + using git, then mount the EmptyDir into the + Pod''s container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#gitrepovolumesource-v1-core' + properties: + directory: + description: directory is the target directory + name. Must not contain or start with '..'. If + '.' is supplied, the volume directory + will be the git repository. Otherwise, + if specified, the volume will contain + the git repository in the subdirectory + with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash + for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Represents a Glusterfs mount that + lasts the lifetime of a pod. Glusterfs volumes + do not support ownership management or SELinux + relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#glusterfsvolumesource-v1-core' + properties: + endpoints: + description: 'endpoints is the endpoint + name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume + path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the + Glusterfs volume to be mounted with read-only + permissions. Defaults to false. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'Represents a host path mapped + into a pod. Host path volumes do not support + ownership management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#hostpathvolumesource-v1-core' + properties: + path: + description: 'path of the directory on the + host. If the path is a symlink, it will + follow the link to the real path. More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults + to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + nfs: + description: 'Represents an NFS mount that lasts + the lifetime of a pod. NFS volumes do not + support ownership management or SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nfsvolumesource-v1-core' + properties: + path: + description: 'path that is exported by the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the + NFS export to be mounted with read-only + permissions. Defaults to false. More info: + https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or + IP address of the NFS server. More info: + https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - server + - path + type: object + projected: + description: 'Represents a projected volume + source + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#projectedvolumesource-v1-core' + properties: + defaultMode: + description: defaultMode are the mode bits + used to set permissions on created files + by default. Must be an octal value between + 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. Directories within + the path are not affected by this setting. + This might be in conflict with other options + that affect the file mode, like fsGroup, + and the result can be other mode bits + set. + format: int32 + type: integer + sources: + description: sources is the list of volume + projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundleProjection + describes how to select a set of + ClusterTrustBundle objects and project + their contents into the pod filesystem. + properties: + labelSelector: + description: A label selector + is a label query over a set + of resources. The result of + matchLabels and matchExpressions + are ANDed. An empty label selector + matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key and + values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to a + set of values. Valid + operators are In, + NotIn, Exists and + DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, the + values array must + be non-empty. If the + operator is Exists + or DoesNotExist, the + values array must + be empty. This array + is replaced during + a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in + the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: If true, don't block + pod startup if the referenced + ClusterTrustBundle(s) aren't + available. If using name, then + the named ClusterTrustBundle + is allowed not to exist. If + using signerName, then the combination + of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from + the volume root to write the + bundle. + type: string + signerName: + description: Select all ClusterTrustBundles + that match this signer name. + Mutually-exclusive with name. The + contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: 'Adapts a ConfigMap into + a projected volume. + + + The contents of the target ConfigMap''s + Data field will be presented in + a projected volume as files using + the keys in the Data field as the + file names, unless the items element + is populated with specific mappings + of keys to paths. Note that this + is identical to a configmap volume + source without the default mode.' + properties: + items: + description: items if unspecified, + each key-value pair in the Data + field of the referenced ConfigMap + will be projected into the volume + as a file whose name is the + key and content is the value. + If specified, the listed keys + will be projected into the specified + paths, and unlisted keys will + not be present. If a key is + specified which is not present + in the ConfigMap, the volume + setup will error unless it is + marked optional. Paths must + be relative and may not contain + the '..' path or start with + '..'. + items: + description: Maps a string key + to a path within a volume. + properties: + key: + description: key is the + key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set + permissions on this file. + Must be an octal value + between 0000 and 0777 + or a decimal value between + 0 and 511. YAML accepts + both octal and decimal + values, JSON requires + decimal values for mode + bits. If not specified, + the volume defaultMode + will be used. This might + be in conflict with other + options that affect the + file mode, like fsGroup, + and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: path is the + relative path of the file + to map the key to. May + not be an absolute path. + May not contain the path + element '..'. May not + start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional specify + whether the ConfigMap or its + keys must be defined + type: boolean + type: object + downwardAPI: + description: Represents downward API + info for projecting into a projected + volume. Note that this is identical + to a downwardAPI volume source without + the default mode. + properties: + items: + description: Items is a list of + DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile + represents information to + create the file containing + the pod field + properties: + fieldRef: + description: ObjectFieldSelector + selects an APIVersioned + field of an object. + properties: + apiVersion: + description: Version + of the schema the + FieldPath is written + in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of + the field to select + in the specified API + version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: + mode bits used to set + permissions on this file, + must be an octal value + between 0000 and 0777 + or a decimal value between + 0 and 511. YAML accepts + both octal and decimal + values, JSON requires + decimal values for mode + bits. If not specified, + the volume defaultMode + will be used. This might + be in conflict with other + options that affect the + file mode, like fsGroup, + and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: + Path is the relative + path name of the file + to be created. Must not + be absolute or contain + the ''..'' path. Must + be utf-8 encoded. The + first item of the relative + path must not start with + ''..''' + type: string + resourceFieldRef: + description: ResourceFieldSelector + represents container resources + (cpu, memory) and their + output format + properties: + containerName: + description: 'Container + name: required for + volumes, optional + for env vars' + type: string + divisor: + description: "Quantity\ + \ is a fixed-point\ + \ representation of\ + \ a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML,\ + \ in addition to String()\ + \ and AsInt64() accessors.\n\ + \nThe serialization\ + \ format is:\n\n```\ + \ \ + \ ::= \n\ + \n\t(Note that \ + \ may be empty, from\ + \ the \"\" case in\ + \ .)\n\n\ + \ + \ ::= 0 | 1 | ...\ + \ | 9 \ + \ ::= \ + \ | \ + \ \ + \ ::= |\ + \ .\ + \ | . | .\ + \ \ + \ ::= \"+\" | \"\ + -\" \ + \ ::= \ + \ | \ + \ \ + \ ::= \ + \ | \ + \ | \ + \ ::= Ki |\ + \ Mi | Gi | Ti | Pi\ + \ | Ei\n\n\t(International\ + \ System of units;\ + \ See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n \ + \ ::= m | \"\" |\ + \ k | M | G | T |\ + \ P | E\n\n\t(Note\ + \ that 1024 = 1Ki\ + \ but 1000 = 1k; I\ + \ didn't choose the\ + \ capitalization.)\n\ + \n\ + \ ::= \"e\" \ + \ | \"E\" \ + \ ```\n\nNo matter\ + \ which of the three\ + \ exponent forms is\ + \ used, no quantity\ + \ may represent a\ + \ number greater than\ + \ 2^63-1 in magnitude,\ + \ nor may it have\ + \ more than 3 decimal\ + \ places. Numbers\ + \ larger or more precise\ + \ will be capped or\ + \ rounded up. (E.g.:\ + \ 0.1m will rounded\ + \ up to 1m.) This\ + \ may be extended\ + \ in the future if\ + \ we require larger\ + \ or smaller quantities.\n\ + \nWhen a Quantity\ + \ is parsed from a\ + \ string, it will\ + \ remember the type\ + \ of suffix it had,\ + \ and will use the\ + \ same type again\ + \ when it is serialized.\n\ + \nBefore serializing,\ + \ Quantity will be\ + \ put in \"canonical\ + \ form\". This means\ + \ that Exponent/suffix\ + \ will be adjusted\ + \ up or down (with\ + \ a corresponding\ + \ increase or decrease\ + \ in Mantissa) such\ + \ that:\n\n- No precision\ + \ is lost - No fractional\ + \ digits will be emitted\ + \ - The exponent (or\ + \ suffix) is as large\ + \ as possible.\n\n\ + The sign will be omitted\ + \ unless the number\ + \ is negative.\n\n\ + Examples:\n\n- 1.5\ + \ will be serialized\ + \ as \"1500m\" - 1.5Gi\ + \ will be serialized\ + \ as \"1536Mi\"\n\n\ + Note that the quantity\ + \ will NEVER be internally\ + \ represented by a\ + \ floating point number.\ + \ That is the whole\ + \ point of this exercise.\n\ + \nNon-canonical values\ + \ will still parse\ + \ as long as they\ + \ are well formed,\ + \ but will be re-emitted\ + \ in their canonical\ + \ form. (So always\ + \ use canonical form,\ + \ or don't diff.)\n\ + \nThis format is intended\ + \ to make it difficult\ + \ to use these numbers\ + \ without writing\ + \ some sort of special\ + \ handling code in\ + \ the hopes that that\ + \ will cause implementors\ + \ to also use a fixed\ + \ point implementation." + type: string + resource: + description: 'Required: + resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: 'Adapts a secret into + a projected volume. + + + The contents of the target Secret''s + Data field will be presented in + a projected volume as files using + the keys in the Data field as the + file names. Note that this is identical + to a secret volume source without + the default mode.' + properties: + items: + description: items if unspecified, + each key-value pair in the Data + field of the referenced Secret + will be projected into the volume + as a file whose name is the + key and content is the value. + If specified, the listed keys + will be projected into the specified + paths, and unlisted keys will + not be present. If a key is + specified which is not present + in the Secret, the volume setup + will error unless it is marked + optional. Paths must be relative + and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key + to a path within a volume. + properties: + key: + description: key is the + key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set + permissions on this file. + Must be an octal value + between 0000 and 0777 + or a decimal value between + 0 and 511. YAML accepts + both octal and decimal + values, JSON requires + decimal values for mode + bits. If not specified, + the volume defaultMode + will be used. This might + be in conflict with other + options that affect the + file mode, like fsGroup, + and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: path is the + relative path of the file + to map the key to. May + not be an absolute path. + May not contain the path + element '..'. May not + start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: optional field specify + whether the Secret or its key + must be defined + type: boolean + type: object + serviceAccountToken: + description: ServiceAccountTokenProjection + represents a projected service account + token volume. This projection can + be used to insert a service account + token into the pods runtime filesystem + for use against APIs (Kubernetes + API Server or otherwise). + properties: + audience: + description: audience is the intended + audience of the token. A recipient + of a token must identify itself + with an identifier specified + in the audience of the token, + and otherwise should reject + the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds + is the requested duration of + validity of the service account + token. As the token approaches + expiration, the kubelet volume + plugin will proactively rotate + the service account token. The + kubelet will start trying to + rotate the token if the token + is older than 80 percent of + its time to live or if the token + is older than 24 hours.Defaults + to 1 hour and must be at least + 10 minutes. + format: int64 + type: integer + path: + description: path is the path + relative to the mount point + of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + secret: + description: 'Adapts a Secret into a volume. + + + The contents of the target Secret''s Data + field will be presented in a volume as files + using the keys in the Data field as the file + names. Secret volumes support ownership management + and SELinux relabeling. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretvolumesource-v1-core' + properties: + defaultMode: + description: 'defaultMode is Optional: mode + bits used to set permissions on created + files by default. Must be an octal value + between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal + and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. + Directories within the path are not affected + by this setting. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each + key-value pair in the Data field of the + referenced Secret will be projected into + the volume as a file whose name is the + key and content is the value. If specified, + the listed keys will be projected into + the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the Secret, the + volume setup will error unless it is marked + optional. Paths must be relative and may + not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on + this file. Must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether + the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of + the secret in the pod''s namespace to + use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource + references the user''s PVC in the same namespace. + This volume finds the bound PV and mounts + that volume for the pod. A PersistentVolumeClaimVolumeSource + is, essentially, a wrapper around another + type of volume that is owned by someone else + (the system). + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#persistentvolumeclaimvolumesource-v1-core' + properties: + claimName: + description: 'claimName is the name of a + PersistentVolumeClaim in the same namespace + as the pod using this volume. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly + setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + customInitContainers: + type: array + description: "A list of custom application init containers\ + \ that run within the coordinator cluster's Pods.\ + \ The\n custom init containers will run following\ + \ the defined sequence as the end of\n cluster's\ + \ Pods init containers.\n\nThe name used in this\ + \ section will be prefixed with the string `c-`\ + \ so that when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n \ + \ have to be prepended with the same prefix.\n\n\ + **Changing this field may require a restart.**\n\ + \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application init container\ + \ that run within the cluster's Pods. The custom\ + \ init\n containers will run following the defined\ + \ sequence as the end of cluster's Pods init\n\ + \ containers.\n\nThe name used in this section\ + \ will be prefixed with the string `c-` so that\ + \ when\n referencing them in the .spec.containers\ + \ section of SGInstanceProfile the name used\n\ + \ have to be prepended with the same prefix.\n\ + \nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The + container image''s CMD is used if this is + not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. + If a variable cannot be resolved, the reference + in the input string will be unchanged. Double + $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal + "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed + within a shell. The container image''s ENTRYPOINT + is used if this is not provided. Variable + references $(VAR_NAME) are expanded using + the container''s environment. If a variable + cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to + set in the container. Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container + and any service environment variables. + If a variable cannot be resolved, the + reference in the input string will be + unchanged. Double $$ are reduced to + a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: EnvVarSource represents a + source for the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the + ConfigMap or its key must be + defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in + terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified API + version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector + represents container resources (cpu, + memory) and their output format + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number.\ + \ It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition\ + \ to String() and AsInt64()\ + \ accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\ + \n\t(Note that may\ + \ be empty, from the \"\" case\ + \ in .)\n\n\ + \ ::= 0 | 1 | ...\ + \ | 9 ::=\ + \ | \ + \ ::= \ + \ | . | .\ + \ | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki |\ + \ Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units;\ + \ See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m |\ + \ \"\" | k | M | G | T | P |\ + \ E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" |\ + \ \"E\" ```\n\ + \nNo matter which of the three\ + \ exponent forms is used, no\ + \ quantity may represent a number\ + \ greater than 2^63-1 in magnitude,\ + \ nor may it have more than\ + \ 3 decimal places. Numbers\ + \ larger or more precise will\ + \ be capped or rounded up. (E.g.:\ + \ 0.1m will rounded up to 1m.)\ + \ This may be extended in the\ + \ future if we require larger\ + \ or smaller quantities.\n\n\ + When a Quantity is parsed from\ + \ a string, it will remember\ + \ the type of suffix it had,\ + \ and will use the same type\ + \ again when it is serialized.\n\ + \nBefore serializing, Quantity\ + \ will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down\ + \ (with a corresponding increase\ + \ or decrease in Mantissa) such\ + \ that:\n\n- No precision is\ + \ lost - No fractional digits\ + \ will be emitted - The exponent\ + \ (or suffix) is as large as\ + \ possible.\n\nThe sign will\ + \ be omitted unless the number\ + \ is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be\ + \ serialized as \"1536Mi\"\n\ + \nNote that the quantity will\ + \ NEVER be internally represented\ + \ by a floating point number.\ + \ That is the whole point of\ + \ this exercise.\n\nNon-canonical\ + \ values will still parse as\ + \ long as they are well formed,\ + \ but will be re-emitted in\ + \ their canonical form. (So\ + \ always use canonical form,\ + \ or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without\ + \ writing some sort of special\ + \ handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point\ + \ implementation." + type: string + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects + a key of a Secret. + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined + within a source must be a C_IDENTIFIER. All + invalid keys will be reported as an event + when the container is starting. When a key + exists in multiple sources, the value associated + with the last source will take precedence. + Values defined by an Env with a duplicate + key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects + a ConfigMap to populate the environment + variables with. + + + The contents of the target ConfigMap''s + Data field will represent the key-value + pairs as environment variables.' + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + prefix: + description: An optional identifier to + prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects + a Secret to populate the environment + variables with. + + + The contents of the target Secret''s + Data field will represent the key-value + pairs as environment variables.' + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: + https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level + config management to default or override container + images in workload controllers like Deployments + and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, + Never, IfNotPresent. Defaults to Always if + :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that + the management system should take in response + to container lifecycle events. For the PostStart + and PreStop lifecycle handlers, management + of the container blocks until the action is + complete, unless the container process fails, + in which case the handler is aborted. + properties: + postStart: + description: LifecycleHandler defines a + specific action that should be taken in + a lifecycle hook. One and only one of + the fields, except TCPSocket must be specified. + properties: + exec: + description: ExecAction describes a + "run in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the + command is root ('/') in the + container's filesystem. The command + is simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, you + need to explicitly call out to + that shell. Exit status of 0 is + treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes + an action based on HTTP Get requests. + properties: + host: + description: Host name to connect + to, defaults to the pod IP. You + probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood + as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a + "sleep" action. + properties: + seconds: + description: Seconds is the number + of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes + an action based on opening a socket + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a + specific action that should be taken in + a lifecycle hook. One and only one of + the fields, except TCPSocket must be specified. + properties: + exec: + description: ExecAction describes a + "run in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the + command is root ('/') in the + container's filesystem. The command + is simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, you + need to explicitly call out to + that shell. Exit status of 0 is + treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes + an action based on HTTP Get requests. + properties: + host: + description: Host name to connect + to, defaults to the pod IP. You + probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood + as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a + "sleep" action. + properties: + seconds: + description: Seconds is the number + of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes + an action based on opening a socket + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check + to be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run + in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to + explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed + after having succeeded. Defaults to 3. + Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of + the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default + behavior is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an + action based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood as + the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness + probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum + value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully + upon probe failure. The grace period is + the duration in seconds after the processes + running in the pod are sent a termination + signal and the time when the processes + are forcibly halted with a kill signal. + Set this value longer than the expected + cleanup time for your process. If this + value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified + as a DNS_LABEL. Each container in a pod must + have a unique name (DNS_LABEL). Cannot be + updated. + type: string + ports: + description: List of ports to expose from the + container. Not specifying a port here DOES + NOT prevent that port from being exposed. + Any port which is listening on the default + "0.0.0.0" address inside a container will + be accessible from the network. Modifying + this array with strategic merge patch may + corrupt the data. For more information See + https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: Number of port to expose + on the pod's IP address. This must be + a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: Number of port to expose + on the host. If specified, this must + be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must + match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be + an IANA_SVC_NAME and unique within the + pod. Each named port in a pod must have + a unique name. Name for the port that + can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be + UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check + to be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run + in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to + explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed + after having succeeded. Defaults to 3. + Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of + the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default + behavior is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an + action based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood as + the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness + probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum + value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully + upon probe failure. The grace period is + the duration in seconds after the processes + running in the pod are sent a termination + signal and the time when the processes + are forcibly halted with a kill signal. + Set this value longer than the expected + cleanup time for your process. If this + value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to + which this resource resize policy applies. + Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when + specified resource is resized. If not + specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes + the compute resource requirements. + properties: + claims: + description: 'Claims lists the names of + resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling + the DynamicResourceAllocation feature + gate. + + + This field is immutable. It can only be + set for containers.' + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t\ + (Note that may be empty, from\ + \ the \"\" case in .)\n\n\ + ::= 0 | 1 | ... |\ + \ 9 ::= |\ + \ \ + \ ::= | . |\ + \ . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t\ + (International System of units; See:\ + \ http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k\ + \ | M | G | T | P | E\n\n\t(Note that\ + \ 1024 = 1Ki but 1000 = 1k; I didn't\ + \ choose the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in\ + \ the future if we require larger or\ + \ smaller quantities.\n\nWhen a Quantity\ + \ is parsed from a string, it will remember\ + \ the type of suffix it had, and will\ + \ use the same type again when it is\ + \ serialized.\n\nBefore serializing,\ + \ Quantity will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with\ + \ a corresponding increase or decrease\ + \ in Mantissa) such that:\n\n- No precision\ + \ is lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign\ + \ will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5\ + \ will be serialized as \"1500m\" -\ + \ 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical\ + \ values will still parse as long as\ + \ they are well formed, but will be\ + \ re-emitted in their canonical form.\ + \ (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these\ + \ numbers without writing some sort\ + \ of special handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t\ + (Note that may be empty, from\ + \ the \"\" case in .)\n\n\ + ::= 0 | 1 | ... |\ + \ 9 ::= |\ + \ \ + \ ::= | . |\ + \ . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t\ + (International System of units; See:\ + \ http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k\ + \ | M | G | T | P | E\n\n\t(Note that\ + \ 1024 = 1Ki but 1000 = 1k; I didn't\ + \ choose the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in\ + \ the future if we require larger or\ + \ smaller quantities.\n\nWhen a Quantity\ + \ is parsed from a string, it will remember\ + \ the type of suffix it had, and will\ + \ use the same type again when it is\ + \ serialized.\n\nBefore serializing,\ + \ Quantity will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with\ + \ a corresponding increase or decrease\ + \ in Mantissa) such that:\n\n- No precision\ + \ is lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign\ + \ will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5\ + \ will be serialized as \"1500m\" -\ + \ 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical\ + \ values will still parse as long as\ + \ they are well formed, but will be\ + \ re-emitted in their canonical form.\ + \ (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these\ + \ numbers without writing some sort\ + \ of special handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum + amount of compute resources required. + If Requests is omitted for a container, + it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart + behavior of individual containers in a pod. + This field may only be set for init containers, + and the only allowed value is "Always". For + non-init containers or when this field is + not specified, the restart behavior is defined + by the Pod''s restart policy and the container + type. Setting the RestartPolicy as "Always" + for the init container will have the following + effect: this init container will be continually + restarted on exit until all regular containers + have terminated. Once all regular containers + have completed, all init containers with restartPolicy + "Always" will be shut down. This lifecycle + differs from normal init containers and is + often referred to as a "sidecar" container. + Although this init container still starts + in the init container sequence, it does not + wait for the container to complete before + proceeding to the next init container. Instead, + the next init container starts immediately + after this init container is started, or after + any startupProbe has successfully completed.' + type: string + securityContext: + description: SecurityContext holds security + configuration that will be applied to a container. + Some fields are present in both SecurityContext + and PodSecurityContext. When both are set, + the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges + than its parent process. This bool directly + controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: + 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when + spec.os.name is windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities + from running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged + mode. Processes in privileged containers + are essentially equivalent to root on + the host. Defaults to false. Note that + this field cannot be set when spec.os.name + is windows. + type: boolean + procMount: + description: procMount denotes the type + of proc mount to use for the containers. + The default is DefaultProcMount which + uses the container runtime defaults for + readonly paths and masked paths. This + requires the ProcMountType feature flag + to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has + a read-only root filesystem. Default is + false. Note that this field cannot be + set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint + of the container process. Uses runtime + default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. Note that this field + cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container + must run as a non-root user. If true, + the Kubelet will validate the image at + runtime to ensure that it does not run + as UID 0 (root) and fail to start the + container if it does. If unset or false, + no such validation will be performed. + May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint + of the container process. Defaults to + user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. Note that this field + cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels + to be applied to the container + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile + source may be set. + properties: + localhostProfile: + description: localhostProfile indicates + a profile defined in a file on the + node should be used. The profile must + be preconfigured on the node to work. + Must be a descending path, relative + to the kubelet's configured seccomp + profile location. Must be set if type + is "Localhost". Must NOT be set for + any other type. + type: string + type: + description: 'type indicates which kind + of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a + file on the node should be used. RuntimeDefault + - the container runtime default profile + should be used. Unconfined - no profile + should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions + contain Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where + the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: HostProcess determines + if a container should be run as a + 'Host Process' container. All of a + Pod's containers must have the same + effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true + then HostNetwork must also be set + to true. + type: boolean + runAsUserName: + description: The UserName in Windows + to run the entrypoint of the container + process. Defaults to the user specified + in image metadata if unspecified. + May also be set in PodSecurityContext. + If set in both SecurityContext and + PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check + to be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run + in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to + explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed + after having succeeded. Defaults to 3. + Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of + the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default + behavior is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an + action based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood as + the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness + probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum + value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully + upon probe failure. The grace period is + the duration in seconds after the processes + running in the pod are sent a termination + signal and the time when the processes + are forcibly halted with a kill signal. + Set this value longer than the expected + cleanup time for your process. If this + value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. + If this is not set, reads from stdin in the + container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should + close the stdin channel after it has been + opened by a single attach. When stdin is true + the stdin stream will remain open across multiple + attach sessions. If stdinOnce is set to true, + stdin is opened on container start, is empty + until the first client attaches to stdin, + and then remains open and accepts data until + the client disconnects, at which time stdin + is closed and remains closed until the container + is restarted. If this flag is false, a container + processes that reads from stdin will never + receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file + to which the container''s termination message + will be written is mounted into the container''s + filesystem. Message written is intended to + be brief final status, such as an assertion + failure message. Will be truncated by the + node if greater than 4096 bytes. The total + message length across all containers will + be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message + should be populated. File will use the contents + of terminationMessagePath to populate the + container status message on both success and + failure. FallbackToLogsOnError will use the + last chunk of container log output if the + termination message file is empty and the + container exited with an error. The log output + is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to + be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block + devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside + of the container that the device will + be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: Path within the container + at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines + how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is + used. This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: Mounted read-only if true, + read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: Path within the volume from + which the container's volume should + be mounted. Defaults to "" (volume's + root). + type: string + subPathExpr: + description: Expanded path within the + volume from which the container's volume + should be mounted. Behaves similarly + to SubPath but environment variable + references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and + SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. + If not specified, the container runtime's + default will be used, which might be configured + in the container image. Cannot be updated. + type: string + customContainers: + type: array + description: "A list of custom application containers\ + \ that run within the shards cluster's Pods.\n\n\ + The name used in this section will be prefixed with\ + \ the string `c-` so that when\n referencing them\ + \ in the .spec.containers section of SGInstanceProfile\ + \ the name used\n have to be prepended with the\ + \ same prefix.\n\n**Changing this field may require\ + \ a restart.**\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\n" + items: + type: object + description: "A custom application container that\ + \ run within the cluster's Pods. The custom\n\ + \ containers will run following the defined sequence\ + \ as the end of cluster's Pods\n containers.\n\ + \nThe name used in this section will be prefixed\ + \ with the string `c-` so that when\n referencing\ + \ them in the .spec.containers section of SGInstanceProfile\ + \ the name used\n have to be prepended with the\ + \ same prefix.\n\nSee: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#container-v1-core\\\ + n\n\n**Changing this field may require a restart.**\n" + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. The + container image''s CMD is used if this is + not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. + If a variable cannot be resolved, the reference + in the input string will be unchanged. Double + $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal + "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed + within a shell. The container image''s ENTRYPOINT + is used if this is not provided. Variable + references $(VAR_NAME) are expanded using + the container''s environment. If a variable + cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to + set in the container. Cannot be updated. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined + environment variables in the container + and any service environment variables. + If a variable cannot be resolved, the + reference in the input string will be + unchanged. Double $$ are reduced to + a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, + regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: EnvVarSource represents a + source for the value of an EnvVar. + properties: + configMapKeyRef: + description: Selects a key from a + ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the + ConfigMap or its key must be + defined + type: boolean + required: + - key + type: object + fieldRef: + description: ObjectFieldSelector selects + an APIVersioned field of an object. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in + terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified API + version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: ResourceFieldSelector + represents container resources (cpu, + memory) and their output format + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + description: "Quantity is a fixed-point\ + \ representation of a number.\ + \ It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition\ + \ to String() and AsInt64()\ + \ accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\ + \n\t(Note that may\ + \ be empty, from the \"\" case\ + \ in .)\n\n\ + \ ::= 0 | 1 | ...\ + \ | 9 ::=\ + \ | \ + \ ::= \ + \ | . | .\ + \ | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= \ + \ | | \ + \ ::= Ki |\ + \ Mi | Gi | Ti | Pi | Ei\n\n\ + \t(International System of units;\ + \ See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m |\ + \ \"\" | k | M | G | T | P |\ + \ E\n\n\t(Note that 1024 = 1Ki\ + \ but 1000 = 1k; I didn't choose\ + \ the capitalization.)\n\n\ + \ ::= \"e\" |\ + \ \"E\" ```\n\ + \nNo matter which of the three\ + \ exponent forms is used, no\ + \ quantity may represent a number\ + \ greater than 2^63-1 in magnitude,\ + \ nor may it have more than\ + \ 3 decimal places. Numbers\ + \ larger or more precise will\ + \ be capped or rounded up. (E.g.:\ + \ 0.1m will rounded up to 1m.)\ + \ This may be extended in the\ + \ future if we require larger\ + \ or smaller quantities.\n\n\ + When a Quantity is parsed from\ + \ a string, it will remember\ + \ the type of suffix it had,\ + \ and will use the same type\ + \ again when it is serialized.\n\ + \nBefore serializing, Quantity\ + \ will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down\ + \ (with a corresponding increase\ + \ or decrease in Mantissa) such\ + \ that:\n\n- No precision is\ + \ lost - No fractional digits\ + \ will be emitted - The exponent\ + \ (or suffix) is as large as\ + \ possible.\n\nThe sign will\ + \ be omitted unless the number\ + \ is negative.\n\nExamples:\n\ + \n- 1.5 will be serialized as\ + \ \"1500m\" - 1.5Gi will be\ + \ serialized as \"1536Mi\"\n\ + \nNote that the quantity will\ + \ NEVER be internally represented\ + \ by a floating point number.\ + \ That is the whole point of\ + \ this exercise.\n\nNon-canonical\ + \ values will still parse as\ + \ long as they are well formed,\ + \ but will be re-emitted in\ + \ their canonical form. (So\ + \ always use canonical form,\ + \ or don't diff.)\n\nThis format\ + \ is intended to make it difficult\ + \ to use these numbers without\ + \ writing some sort of special\ + \ handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point\ + \ implementation." + type: string + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: SecretKeySelector selects + a key of a Secret. + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined + within a source must be a C_IDENTIFIER. All + invalid keys will be reported as an event + when the container is starting. When a key + exists in multiple sources, the value associated + with the last source will take precedence. + Values defined by an Env with a duplicate + key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the + source of a set of ConfigMaps + properties: + configMapRef: + description: 'ConfigMapEnvSource selects + a ConfigMap to populate the environment + variables with. + + + The contents of the target ConfigMap''s + Data field will represent the key-value + pairs as environment variables.' + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + prefix: + description: An optional identifier to + prepend to each key in the ConfigMap. + Must be a C_IDENTIFIER. + type: string + secretRef: + description: 'SecretEnvSource selects + a Secret to populate the environment + variables with. + + + The contents of the target Secret''s + Data field will represent the key-value + pairs as environment variables.' + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: + https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level + config management to default or override container + images in workload controllers like Deployments + and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, + Never, IfNotPresent. Defaults to Always if + :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle describes actions that + the management system should take in response + to container lifecycle events. For the PostStart + and PreStop lifecycle handlers, management + of the container blocks until the action is + complete, unless the container process fails, + in which case the handler is aborted. + properties: + postStart: + description: LifecycleHandler defines a + specific action that should be taken in + a lifecycle hook. One and only one of + the fields, except TCPSocket must be specified. + properties: + exec: + description: ExecAction describes a + "run in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the + command is root ('/') in the + container's filesystem. The command + is simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, you + need to explicitly call out to + that shell. Exit status of 0 is + treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes + an action based on HTTP Get requests. + properties: + host: + description: Host name to connect + to, defaults to the pod IP. You + probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood + as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a + "sleep" action. + properties: + seconds: + description: Seconds is the number + of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes + an action based on opening a socket + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: LifecycleHandler defines a + specific action that should be taken in + a lifecycle hook. One and only one of + the fields, except TCPSocket must be specified. + properties: + exec: + description: ExecAction describes a + "run in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the + command is root ('/') in the + container's filesystem. The command + is simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, you + need to explicitly call out to + that shell. Exit status of 0 is + treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGetAction describes + an action based on HTTP Get requests. + properties: + host: + description: Host name to connect + to, defaults to the pod IP. You + probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set + in the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in + HTTP probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood + as the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the + HTTP server. + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: SleepAction describes a + "sleep" action. + properties: + seconds: + description: Seconds is the number + of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: TCPSocketAction describes + an action based on opening a socket + properties: + host: + description: 'Optional: Host name + to connect to, defaults to the + pod IP.' + type: string + port: + description: IntOrString is a type + that can hold an int32 or a string. When + used in JSON or YAML marshalling + and unmarshalling, it produces + or consumes the inner type. This + allows you to have, for example, + a JSON field that can accept a + name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probe describes a health check + to be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run + in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to + explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed + after having succeeded. Defaults to 3. + Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of + the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default + behavior is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an + action based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood as + the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness + probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum + value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully + upon probe failure. The grace period is + the duration in seconds after the processes + running in the pod are sent a termination + signal and the time when the processes + are forcibly halted with a kill signal. + Set this value longer than the expected + cleanup time for your process. If this + value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified + as a DNS_LABEL. Each container in a pod must + have a unique name (DNS_LABEL). Cannot be + updated. + type: string + ports: + description: List of ports to expose from the + container. Not specifying a port here DOES + NOT prevent that port from being exposed. + Any port which is listening on the default + "0.0.0.0" address inside a container will + be accessible from the network. Modifying + this array with strategic merge patch may + corrupt the data. For more information See + https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network + port in a single container. + properties: + containerPort: + description: Number of port to expose + on the pod's IP address. This must be + a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the + external port to. + type: string + hostPort: + description: Number of port to expose + on the host. If specified, this must + be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must + match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be + an IANA_SVC_NAME and unique within the + pod. Each named port in a pod must have + a unique name. Name for the port that + can be referred to by services. + type: string + protocol: + description: Protocol for port. Must be + UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + description: Probe describes a health check + to be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run + in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to + explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed + after having succeeded. Defaults to 3. + Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of + the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default + behavior is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an + action based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood as + the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness + probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum + value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully + upon probe failure. The grace period is + the duration in seconds after the processes + running in the pod are sent a termination + signal and the time when the processes + are forcibly halted with a kill signal. + Set this value longer than the expected + cleanup time for your process. If this + value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the + container. + items: + description: ContainerResizePolicy represents + resource resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to + which this resource resize policy applies. + Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when + specified resource is resized. If not + specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + resources: + description: ResourceRequirements describes + the compute resource requirements. + properties: + claims: + description: 'Claims lists the names of + resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling + the DynamicResourceAllocation feature + gate. + + + This field is immutable. It can only be + set for containers.' + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t\ + (Note that may be empty, from\ + \ the \"\" case in .)\n\n\ + ::= 0 | 1 | ... |\ + \ 9 ::= |\ + \ \ + \ ::= | . |\ + \ . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t\ + (International System of units; See:\ + \ http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k\ + \ | M | G | T | P | E\n\n\t(Note that\ + \ 1024 = 1Ki but 1000 = 1k; I didn't\ + \ choose the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in\ + \ the future if we require larger or\ + \ smaller quantities.\n\nWhen a Quantity\ + \ is parsed from a string, it will remember\ + \ the type of suffix it had, and will\ + \ use the same type again when it is\ + \ serialized.\n\nBefore serializing,\ + \ Quantity will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with\ + \ a corresponding increase or decrease\ + \ in Mantissa) such that:\n\n- No precision\ + \ is lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign\ + \ will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5\ + \ will be serialized as \"1500m\" -\ + \ 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical\ + \ values will still parse as long as\ + \ they are well formed, but will be\ + \ re-emitted in their canonical form.\ + \ (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these\ + \ numbers without writing some sort\ + \ of special handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point\ + \ representation of a number. It provides\ + \ convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String()\ + \ and AsInt64() accessors.\n\nThe serialization\ + \ format is:\n\n``` \ + \ ::= \n\n\t\ + (Note that may be empty, from\ + \ the \"\" case in .)\n\n\ + ::= 0 | 1 | ... |\ + \ 9 ::= |\ + \ \ + \ ::= | . |\ + \ . | . \ + \ ::= \"+\" | \"-\" \ + \ ::= | \ + \ ::= | \ + \ | ::=\ + \ Ki | Mi | Gi | Ti | Pi | Ei\n\n\t\ + (International System of units; See:\ + \ http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k\ + \ | M | G | T | P | E\n\n\t(Note that\ + \ 1024 = 1Ki but 1000 = 1k; I didn't\ + \ choose the capitalization.)\n\n\ + \ ::= \"e\" | \"E\" \ + \ ```\n\nNo matter which of the three\ + \ exponent forms is used, no quantity\ + \ may represent a number greater than\ + \ 2^63-1 in magnitude, nor may it have\ + \ more than 3 decimal places. Numbers\ + \ larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded\ + \ up to 1m.) This may be extended in\ + \ the future if we require larger or\ + \ smaller quantities.\n\nWhen a Quantity\ + \ is parsed from a string, it will remember\ + \ the type of suffix it had, and will\ + \ use the same type again when it is\ + \ serialized.\n\nBefore serializing,\ + \ Quantity will be put in \"canonical\ + \ form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with\ + \ a corresponding increase or decrease\ + \ in Mantissa) such that:\n\n- No precision\ + \ is lost - No fractional digits will\ + \ be emitted - The exponent (or suffix)\ + \ is as large as possible.\n\nThe sign\ + \ will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5\ + \ will be serialized as \"1500m\" -\ + \ 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER\ + \ be internally represented by a floating\ + \ point number. That is the whole point\ + \ of this exercise.\n\nNon-canonical\ + \ values will still parse as long as\ + \ they are well formed, but will be\ + \ re-emitted in their canonical form.\ + \ (So always use canonical form, or\ + \ don't diff.)\n\nThis format is intended\ + \ to make it difficult to use these\ + \ numbers without writing some sort\ + \ of special handling code in the hopes\ + \ that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum + amount of compute resources required. + If Requests is omitted for a container, + it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart + behavior of individual containers in a pod. + This field may only be set for init containers, + and the only allowed value is "Always". For + non-init containers or when this field is + not specified, the restart behavior is defined + by the Pod''s restart policy and the container + type. Setting the RestartPolicy as "Always" + for the init container will have the following + effect: this init container will be continually + restarted on exit until all regular containers + have terminated. Once all regular containers + have completed, all init containers with restartPolicy + "Always" will be shut down. This lifecycle + differs from normal init containers and is + often referred to as a "sidecar" container. + Although this init container still starts + in the init container sequence, it does not + wait for the container to complete before + proceeding to the next init container. Instead, + the next init container starts immediately + after this init container is started, or after + any startupProbe has successfully completed.' + type: string + securityContext: + description: SecurityContext holds security + configuration that will be applied to a container. + Some fields are present in both SecurityContext + and PodSecurityContext. When both are set, + the values in SecurityContext take precedence. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges + than its parent process. This bool directly + controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: + 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when + spec.os.name is windows.' + type: boolean + capabilities: + description: Adds and removes POSIX capabilities + from running containers. + properties: + add: + description: Added capabilities + items: + type: string + type: array + drop: + description: Removed capabilities + items: + type: string + type: array + type: object + privileged: + description: Run container in privileged + mode. Processes in privileged containers + are essentially equivalent to root on + the host. Defaults to false. Note that + this field cannot be set when spec.os.name + is windows. + type: boolean + procMount: + description: procMount denotes the type + of proc mount to use for the containers. + The default is DefaultProcMount which + uses the container runtime defaults for + readonly paths and masked paths. This + requires the ProcMountType feature flag + to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has + a read-only root filesystem. Default is + false. Note that this field cannot be + set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint + of the container process. Uses runtime + default if unset. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. Note that this field + cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container + must run as a non-root user. If true, + the Kubelet will validate the image at + runtime to ensure that it does not run + as UID 0 (root) and fail to start the + container if it does. If unset or false, + no such validation will be performed. + May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint + of the container process. Defaults to + user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. Note that this field + cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: SELinuxOptions are the labels + to be applied to the container + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + type: object + seccompProfile: + description: SeccompProfile defines a pod/container's + seccomp profile settings. Only one profile + source may be set. + properties: + localhostProfile: + description: localhostProfile indicates + a profile defined in a file on the + node should be used. The profile must + be preconfigured on the node to work. + Must be a descending path, relative + to the kubelet's configured seccomp + profile location. Must be set if type + is "Localhost". Must NOT be set for + any other type. + type: string + type: + description: 'type indicates which kind + of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a + file on the node should be used. RuntimeDefault + - the container runtime default profile + should be used. Unconfined - no profile + should be applied.' + type: string + required: + - type + type: object + windowsOptions: + description: WindowsSecurityContextOptions + contain Windows-specific options and credentials. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where + the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName + field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. + type: string + hostProcess: + description: HostProcess determines + if a container should be run as a + 'Host Process' container. All of a + Pod's containers must have the same + effective HostProcess value (it is + not allowed to have a mix of HostProcess + containers and non-HostProcess containers). + In addition, if HostProcess is true + then HostNetwork must also be set + to true. + type: boolean + runAsUserName: + description: The UserName in Windows + to run the entrypoint of the container + process. Defaults to the user specified + in image metadata if unspecified. + May also be set in PodSecurityContext. + If set in both SecurityContext and + PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probe describes a health check + to be performed against a container to determine + whether it is alive or ready to receive traffic. + properties: + exec: + description: ExecAction describes a "run + in container" action. + properties: + command: + description: Command is the command + line to execute inside the container, + the working directory for the command is + root ('/') in the container's filesystem. + The command is simply exec'd, it is + not run inside a shell, so traditional + shell instructions ('|', etc) won't + work. To use a shell, you need to + explicitly call out to that shell. + Exit status of 0 is treated as live/healthy + and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures + for the probe to be considered failed + after having succeeded. Defaults to 3. + Minimum value is 1. + format: int32 + type: integer + grpc: + properties: + port: + description: Port number of the gRPC + service. Number must be in the range + 1 to 65535. + format: int32 + type: integer + service: + description: 'Service is the name of + the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default + behavior is defined by gRPC.' + type: string + required: + - port + type: object + httpGet: + description: HTTPGetAction describes an + action based on HTTP Get requests. + properties: + host: + description: Host name to connect to, + defaults to the pod IP. You probably + want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in + the request. HTTP allows repeated + headers. + items: + description: HTTPHeader describes + a custom header to be used in HTTP + probes + properties: + name: + description: The header field + name. This will be canonicalized + upon output, so case-variant + names will be understood as + the same header. + type: string + value: + description: The header field + value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the + container has started before liveness + probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform + the probe. Default to 10 seconds. Minimum + value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes + for the probe to be considered successful + after having failed. Defaults to 1. Must + be 1 for liveness and startup. Minimum + value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocketAction describes an + action based on opening a socket + properties: + host: + description: 'Optional: Host name to + connect to, defaults to the pod IP.' + type: string + port: + description: IntOrString is a type that + can hold an int32 or a string. When + used in JSON or YAML marshalling and + unmarshalling, it produces or consumes + the inner type. This allows you to + have, for example, a JSON field that + can accept a name or number. + format: int-or-string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully + upon probe failure. The grace period is + the duration in seconds after the processes + running in the pod are sent a termination + signal and the time when the processes + are forcibly halted with a kill signal. + Set this value longer than the expected + cleanup time for your process. If this + value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value + must be non-negative integer. The value + zero indicates stop immediately via the + kill signal (no opportunity to shut down). + This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which + the probe times out. Defaults to 1 second. + Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. + If this is not set, reads from stdin in the + container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should + close the stdin channel after it has been + opened by a single attach. When stdin is true + the stdin stream will remain open across multiple + attach sessions. If stdinOnce is set to true, + stdin is opened on container start, is empty + until the first client attaches to stdin, + and then remains open and accepts data until + the client disconnects, at which time stdin + is closed and remains closed until the container + is restarted. If this flag is false, a container + processes that reads from stdin will never + receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file + to which the container''s termination message + will be written is mounted into the container''s + filesystem. Message written is intended to + be brief final status, such as an assertion + failure message. Will be truncated by the + node if greater than 4096 bytes. The total + message length across all containers will + be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message + should be populated. File will use the contents + of terminationMessagePath to populate the + container status message on both success and + failure. FallbackToLogsOnError will use the + last chunk of container log output if the + termination message file is empty and the + container exited with an error. The log output + is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to + be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block + devices to be used by the container. + items: + description: volumeDevice describes a mapping + of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside + of the container that the device will + be mapped to. + type: string + name: + description: name must match the name + of a persistentVolumeClaim in the pod + type: string + required: + - name + - devicePath + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: Path within the container + at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines + how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is + used. This field is beta in 1.10. + type: string + name: + description: This must match the Name + of a Volume. + type: string + readOnly: + description: Mounted read-only if true, + read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: Path within the volume from + which the container's volume should + be mounted. Defaults to "" (volume's + root). + type: string + subPathExpr: + description: Expanded path within the + volume from which the container's volume + should be mounted. Behaves similarly + to SubPath but environment variable + references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and + SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + type: array + workingDir: + description: Container's working directory. + If not specified, the container runtime's + default will be used, which might be configured + in the container image. Cannot be updated. + type: string + customVolumeMounts: + type: object + description: Custom Pod volumes to mount into the + specified container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the + specified container's filesystem. + items: + description: 'VolumeMount describes a mounting + of a Volume within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at + which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, + MountPropagationNone is used. This field + is beta in 1.10. + type: string + name: + description: This must match the Name of a + Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should + be mounted. Behaves similarly to SubPath + but environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + customInitVolumeMounts: + type: object + description: Custom Pod volumes to mount into the + specified init container's filesystem. + additionalProperties: + type: array + description: Custom Pod volumes to mount into the + specified init container's filesystem. + items: + description: 'VolumeMount describes a mounting + of a Volume within a container. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#volumemount-v1-core' + properties: + mountPath: + description: Path within the container at + which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how + mounts are propagated from the host to container + and the other way around. When not set, + MountPropagationNone is used. This field + is beta in 1.10. + type: string + name: + description: This must match the Name of a + Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should + be mounted. Behaves similarly to SubPath + but environment variable references $(VAR_NAME) + are expanded using the container's environment. + Defaults to "" (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + required: + - name + - mountPath + type: object + configurations: + type: object + description: 'Shards custom configurations. + + ' + properties: + sgPostgresConfig: + type: string + description: 'Name of the [SGPostgresConfig](https://stackgres.io/doc/latest/reference/crd/sgpgconfig) + used for the cluster. It must exist. When not set, + a default Postgres config, for the major version + selected, is used. + + ' + sgPoolingConfig: + type: string + description: 'Name of the [SGPoolingConfig](https://stackgres.io/doc/latest/reference/crd/sgpoolconfig) + used for this cluster. Each pod contains a sidecar + with a connection pooler (currently: [PgBouncer](https://www.pgbouncer.org/)). + The connection pooler is implemented as a sidecar. + + + If not set, a default configuration will be used. + Disabling connection pooling altogether is possible + if the disableConnectionPooling property of the + pods object is set to true. + + ' + replication: + type: object + description: "This section allows to configure the global\ + \ Postgres replication mode.\n\nThe main replication\ + \ group is implicit and contains the total number of\ + \ instances less the sum of all\n instances in other\ + \ replication groups.\n\nThe total number of instances\ + \ is always specified by `.spec.instances`.\n" + properties: + mode: + type: string + description: "The replication mode applied to the\ + \ whole cluster.\nPossible values are:\n* `async`\ + \ (default)\n* `sync`\n* `strict-sync`\n* `sync-all`\n\ + * `strict-sync-all`\n\n**async**\n\nWhen in asynchronous\ + \ mode the cluster is allowed to lose some committed\ + \ transactions.\n When the primary server fails\ + \ or becomes unavailable for any other reason a\ + \ sufficiently healthy standby\n will automatically\ + \ be promoted to primary. Any transactions that\ + \ have not been replicated to that standby\n remain\ + \ in a \"forked timeline\" on the primary, and are\ + \ effectively unrecoverable (the data is still there,\n\ + \ but recovering it requires a manual recovery\ + \ effort by data recovery specialists).\n\n**sync**\n\ + \nWhen in synchronous mode a standby will not be\ + \ promoted unless it is certain that the standby\ + \ contains all\n transactions that may have returned\ + \ a successful commit status to client (clients\ + \ can change the behavior\n per transaction using\ + \ PostgreSQL’s `synchronous_commit` setting. Transactions\ + \ with `synchronous_commit`\n values of `off` and\ + \ `local` may be lost on fail over, but will not\ + \ be blocked by replication delays). This\n means\ + \ that the system may be unavailable for writes\ + \ even though some servers are available. System\n\ + \ administrators can still use manual failover\ + \ commands to promote a standby even if it results\ + \ in transaction\n loss.\n\nSynchronous mode does\ + \ not guarantee multi node durability of commits\ + \ under all circumstances. When no suitable\n standby\ + \ is available, primary server will still accept\ + \ writes, but does not guarantee their replication.\ + \ When\n the primary fails in this mode no standby\ + \ will be promoted. When the host that used to be\ + \ the primary comes\n back it will get promoted\ + \ automatically, unless system administrator performed\ + \ a manual failover. This behavior\n makes synchronous\ + \ mode usable with 2 node clusters.\n\nWhen synchronous\ + \ mode is used and a standby crashes, commits will\ + \ block until the primary is switched to standalone\n\ + \ mode. Manually shutting down or restarting a\ + \ standby will not cause a commit service interruption.\ + \ Standby will\n signal the primary to release\ + \ itself from synchronous standby duties before\ + \ PostgreSQL shutdown is initiated.\n\n**strict-sync**\n\ + \nWhen it is absolutely necessary to guarantee that\ + \ each write is stored durably on at least two nodes,\ + \ use the strict\n synchronous mode. This mode\ + \ prevents synchronous replication to be switched\ + \ off on the primary when no synchronous\n standby\ + \ candidates are available. As a downside, the primary\ + \ will not be available for writes (unless the Postgres\n\ + \ transaction explicitly turns off `synchronous_mode`\ + \ parameter), blocking all client write requests\ + \ until at least one\n synchronous replica comes\ + \ up.\n\n**Note**: Because of the way synchronous\ + \ replication is implemented in PostgreSQL it is\ + \ still possible to lose\n transactions even when\ + \ using strict synchronous mode. If the PostgreSQL\ + \ backend is cancelled while waiting to acknowledge\n\ + \ replication (as a result of packet cancellation\ + \ due to client timeout or backend failure) transaction\ + \ changes become\n visible for other backends.\ + \ Such changes are not yet replicated and may be\ + \ lost in case of standby promotion.\n\n**sync-all**\n\ + \nThe same as `sync` but `syncInstances` is ignored\ + \ and the number of synchronous instances is equals\ + \ to the total number\n of instances less one.\n\ + \n**strict-sync-all**\n\nThe same as `strict-sync`\ + \ but `syncInstances` is ignored and the number\ + \ of synchronous instances is equals to the total\ + \ number\n of instances less one.\n" + default: async + syncInstances: + type: integer + minimum: 1 + description: "Number of synchronous standby instances.\ + \ Must be less than the total number of instances.\ + \ It is set to 1 by default.\n Only setteable if\ + \ mode is `sync` or `strict-sync`.\n" + initialization: + type: object + description: 'Allow to specify how the replicas are + initialized. + + ' + properties: + mode: + type: string + description: "Allow to specify how the replicas\ + \ are initialized.\n\nPossible values are:\n\ + \n* `FromPrimary`: When this mode is used replicas\ + \ will be always created from the primary using\ + \ `pg_basebackup`.\n* `FromReplica`: When this\ + \ mode is used replicas will be created from\ + \ another existing replica using\n `pg_basebackup`.\ + \ Fallsback to `FromPrimary` if there's no replica\ + \ or it fails.\n* `FromExistingBackup`: When\ + \ this mode is used replicas will be created\ + \ from an existing SGBackup. If `backupNewerThan`\ + \ is set\n the SGBackup must be newer than its\ + \ value. When this mode fails to restore an\ + \ SGBackup it will try with a previous one (if\ + \ exists).\n Fallsback to `FromReplica` if there's\ + \ no backup left or it fails.\n* `FromNewlyCreatedBackup`:\ + \ When this mode is used replicas will be created\ + \ from a newly created SGBackup.\n Fallsback\ + \ to `FromExistingBackup` if `backupNewerThan`\ + \ is set and exists a recent backup newer than\ + \ its value or it fails.\n" + default: FromExistingBackup + backupNewerThan: + type: string + description: "An ISO 8601 duration in the format\ + \ `PnDTnHnMn.nS`, that specifies how old an\ + \ SGBackup have to be in order to be seleceted\n\ + \ to initialize a replica.\n\nWhen `FromExistingBackup`\ + \ mode is set this field restrict the selection\ + \ of SGBackup to be used for recovery newer\ + \ than the\n specified value. \n\nWhen `FromNewlyCreatedBackup`\ + \ mode is set this field skip the creation SGBackup\ + \ to be used for recovery if one newer than\n\ + \ the specified value exists. \n" + backupRestorePerformance: + type: object + description: 'Configuration that affects the backup + network and disk usage performance during recovery. + + ' + properties: + maxNetworkBandwidth: + type: integer + description: 'Maximum storage upload bandwidth + used when storing a backup. In bytes (per + second). + + ' + maxDiskBandwidth: + type: integer + description: 'Maximum disk read I/O when performing + a backup. In bytes (per second). + + ' + downloadConcurrency: + type: integer + minimum: 1 + description: 'Backup storage may use several + concurrent streams to read the data. This + parameter configures the number of parallel + streams to use. By default, it''s set to + the minimum between the number of file to + read and 10. + + ' + metadata: + type: object + description: Metadata information from shards cluster + created resources. + properties: + annotations: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + to be passed to resources created and managed by + StackGres. + properties: + allResources: + type: object + description: Annotations to attach to any resource + created or managed by StackGres. + additionalProperties: + type: string + clusterPods: + type: object + description: Annotations to attach to pods created + or managed by StackGres. + additionalProperties: + type: string + services: + type: object + description: Annotations to attach to all services + created or managed by StackGres. + additionalProperties: + type: string + primaryService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-primary` service. + additionalProperties: + type: string + replicasService: + type: object + description: Custom Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) + passed to the `-replicas` service. + additionalProperties: + type: string + labels: + type: object + description: Custom Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) + to be passed to resources created and managed by + StackGres. + properties: + clusterPods: + type: object + description: Labels to attach to Pods created + or managed by StackGres. + additionalProperties: + type: string + services: + type: object + description: Labels to attach to Services and + Endpoints created or managed by StackGres. + additionalProperties: + type: string + prometheusAutobind: + type: boolean + description: '**Deprecated** use instead .spec.configurations.observability.prometheusAutobind. + + ' + distributedLogs: + type: object + description: "StackGres features a functionality for all pods to\ + \ send Postgres, Patroni and PgBouncer logs to a central (distributed)\ + \ location, which is in turn another Postgres database. Logs can\ + \ then be accessed via SQL interface or from the web UI. This\ + \ section controls whether to enable this feature or not. If not\ + \ enabled, logs are send to the pod's standard output.\n\n**Example:**\n\ + \n```yaml\napiVersion: stackgres.io/v1alpha1\nkind: SGShardedCluster\n\ + metadata:\n name: stackgres\nspec:\n distributedLogs:\n sgDistributedLogs:\ + \ distributedlogs\n```\n" + properties: + sgDistributedLogs: + type: string + description: 'Name of the [SGDistributedLogs](https://stackgres.io/doc/latest/reference/crd/sgdistributedlogs/) + to use for this cluster. It must exist. + + ' + retention: + type: string + pattern: ^[0-9]+ (minutes?|hours?|days?|months?) + description: "Define a retention window with the syntax `\ + \ (minutes|hours|days|months)` in which log entries are kept.\n\ + \ Log entries will be removed when they get older more than\ + \ the double of the specified retention window.\n\nWhen this\ + \ field is changed the retention will be applied only to log\ + \ entries that are newer than the end of\n the retention\ + \ window previously specified. If no retention window was\ + \ previously specified it is considered\n to be of 7 days.\ + \ This means that if previous retention window is of `7 days`\ + \ new retention configuration will\n apply after UTC timestamp\ + \ calculated with: `SELECT date_trunc('days', now() at time\ + \ zone 'UTC') - INTERVAL '7 days'`.\n" + nonProductionOptions: + type: object + properties: + disableClusterPodAntiAffinity: + type: boolean + description: 'It is a best practice, on non-containerized environments, + when running production workloads, to run each database server + on a different server (virtual or physical), i.e., not to + co-locate more than one database server per host. + + + The same best practice applies to databases on containers. + By default, StackGres will not allow to run more than one + StackGres pod on a given Kubernetes node. Set this property + to true to allow more than one StackGres pod per node. + + + This property default value may be changed depending on the + value of field `.spec.profile`. + + + This property default value may be changed depending on the + value of field `.spec.profile`. + + ' + disablePatroniResourceRequirements: + type: boolean + description: 'It is a best practice, on containerized environments, + when running production workloads, to enforce container''s + resources requirements. + + + The same best practice applies to databases on containers. + By default, StackGres will configure resource requirements + for patroni container. Set this property to true to prevent + StackGres from setting patroni container''s resources requirement. + + + This property default value may be changed depending on the + value of field `.spec.profile`. + + ' + disableClusterResourceRequirements: + type: boolean + description: 'It is a best practice, on containerized environments, + when running production workloads, to enforce container''s + resources requirements. + + + By default, StackGres will configure resource requirements + for all the containers. Set this property to true to prevent + StackGres from setting container''s resources requirements + (except for patroni container, see `disablePatroniResourceRequirements`). + + + This property default value may be changed depending on the + value of field `.spec.profile`. + + ' + enableSetPatroniCpuRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ cpu requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less cpu than\ + \ it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs\ + \ on the node.\n\nBy default, StackGres will configure cpu\ + \ requirements to have the same limit and request for the\ + \ patroni container. Set this property to true to prevent\ + \ StackGres from setting patroni container's cpu requirements\ + \ request equals to the limit\n when `.spec.requests.cpu`\ + \ is configured in the referenced `SGInstanceProfile`.\n" + default: false + enableSetClusterCpuRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ cpu requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less cpu than\ + \ it requires. It also allow to set [static CPU management\ + \ policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy)\ + \ that allows to guarantee a pod the usage exclusive CPUs\ + \ on the node.\n\nBy default, StackGres will configure cpu\ + \ requirements to have the same limit and request for all\ + \ the containers. Set this property to true to prevent StackGres\ + \ from setting container's cpu requirements request equals\ + \ to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..cpu` `.spec.requests.initContainers..cpu` is configured in the referenced `SGInstanceProfile`.\n" + default: false + enableSetPatroniMemoryRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ memory requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less memory\ + \ than it requires.\n\nBy default, StackGres will configure\ + \ memory requirements to have the same limit and request for\ + \ the patroni container. Set this property to true to prevent\ + \ StackGres from setting patroni container's memory requirements\ + \ request equals to the limit\n when `.spec.requests.memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n" + default: false + enableSetClusterMemoryRequests: + type: boolean + description: "**Deprecated** this value is ignored and you can\ + \ consider it as always `true`.\n\nOn containerized environments,\ + \ when running production workloads, enforcing container's\ + \ memory requirements request to be equals to the limit allow\ + \ to achieve the highest level of performance. Doing so, reduces\ + \ the chances of leaving\n the workload with less memory\ + \ than it requires.\n\nBy default, StackGres will configure\ + \ memory requirements to have the same limit and request for\ + \ all the containers. Set this property to true to prevent\ + \ StackGres from setting container's memory requirements request\ + \ equals to the limit (except for patroni container, see `enablePatroniCpuRequests`)\n\ + \ when `.spec.requests.containers..memory`\ + \ `.spec.requests.initContainers..memory`\ + \ is configured in the referenced `SGInstanceProfile`.\n" + default: false + enabledFeatureGates: + type: array + description: 'A list of StackGres feature gates to enable (not + suitable for a production environment). + + + Available feature gates are: + + * `babelfish-flavor`: Allow to use `babelfish` flavor. + + ' + items: + type: string + description: The name of the fature gate to enable. + initialData: + type: object + description: 'Sharded cluster initialization data options. Sharded + cluster may be initialized empty, or from a sharded backup restoration. + + + **This field can only be set on creation.** + + ' + properties: + restore: + type: object + description: 'This section allows to restore a sharded cluster + from an existing copy of the metadata and data. + + ' + properties: + fromBackup: + type: object + description: "From which sharded backup to restore and how\ + \ the process is configured\n\n**Example:**\n\n```yaml\n\ + apiVersion: stackgres.io/v1\nkind: SGShardedCluster\n\ + metadata:\n name: stackgres\nspec:\n initialData:\n\ + \ restore:\n fromBackup:\n name: stackgres-backup\n\ + \ downloadDiskConcurrency: 1\n```\n" + properties: + name: + type: string + description: "When set to the name of an existing [SGShardedBackup](https://stackgres.io/doc/latest/reference/crd/sgshardedbackup),\ + \ the sharded cluster is initialized by restoring\ + \ the\n backup data to it. If not set, the sharded\ + \ cluster is initialized empty. The selected sharded\ + \ backup must be in the same namespace.\n" + targetInclusive: + type: boolean + description: "Specify the [recovery_target_inclusive](https://postgresqlco.nf/doc/en/param/recovery_target_timeline/)\ + \ to stop recovery just after the specified\n recovery\ + \ target (true), or just before the recovery target\ + \ (false). Applies when targetLsn, pointInTimeRecovery,\ + \ or targetXid is specified. This\n setting controls\ + \ whether transactions having exactly the target WAL\ + \ location (LSN), commit time, or transaction ID,\ + \ respectively, will be included\n in the recovery.\ + \ Default is true.\n" + pointInTimeRecovery: + type: object + description: "It is possible to restore the database\ + \ to its state at any time since your backup was taken\ + \ using Point-in-Time Recovery (PITR) as long as another\n\ + \ backup newer than the PITR requested restoration\ + \ date does not exists.\n\nPoint In Time Recovery\ + \ (PITR). PITR allow to restore the database state\ + \ to an arbitrary point of time in the past, as long\ + \ as you specify a backup\n older than the PITR requested\ + \ restoration date and does not exists a backup newer\ + \ than the same restoration date.\n\nSee also: https://www.postgresql.org/docs/current/continuous-archiving.html\n" + properties: + restoreToTimestamp: + type: string + description: 'An ISO 8601 date, that holds UTC date + indicating at which point-in-time the database + have to be restored. + + ' + downloadDiskConcurrency: + type: integer + minimum: 1 + description: 'The backup fetch process may fetch several + streams in parallel. Parallel fetching is enabled when + set to a value larger than one. + + + If not specified it will be interpreted as latest. + + ' + status: + type: object + description: Current status of a StackGres sharded cluster. + properties: + conditions: + type: array + items: + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of deployment condition. + type: string + clusterStatuses: + type: array + description: The list of cluster statuses. + items: + type: object + required: + - name + properties: + name: + type: string + description: The name of the cluster. + pendingRestart: + type: boolean + description: Indicates if the cluster requires restart + toInstallPostgresExtensions: + type: array + description: The list of Postgres extensions to install + items: + type: object + required: + - name + - publisher + - version + - repository + - postgresVersion + properties: + name: + type: string + description: The name of the extension to install. + publisher: + type: string + description: The id of the publisher of the extension to install. + version: + type: string + description: The version of the extension to install. + repository: + type: string + description: The repository base URL from where the extension + will be installed from. + postgresVersion: + type: string + description: The postgres major version of the extension to + install. + build: + type: string + description: The build version of the extension to install. + extraMounts: + type: array + description: The extra mounts of the extension to install. + items: + type: string + description: The extra mount of the installed extension. + binding: + type: object + description: 'This section follow the schema specified in [Service + Binding spec for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service). + + + For more information see https://servicebinding.io/spec/core/1.0.0/ + + ' + properties: + name: + type: string + description: The name of the Secret as specified in [Service + Binding spec for provisioned service](https://servicebinding.io/spec/core/1.0.0/#provisioned-service). + sgBackups: + type: array + description: 'The list of SGBackups that compose the SGShardedBackup + used to restore the sharded cluster. + + ' + items: + type: string + description: 'One of the SGBackups that compose the SGShardedBackup + used to restore the sharded cluster. + + ' diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardeddbops.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardeddbops.yaml new file mode 100644 index 00000000000..d53e7837806 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgshardeddbops.yaml @@ -0,0 +1,1365 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgshardeddbops.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGShardedDbOps + listKind: SGShardedDbOpsList + plural: sgshardeddbops + singular: sgshardeddbops + shortNames: + - sgsdo + versions: + - name: v1 + served: true + storage: true + additionalPrinterColumns: + - name: cluster + type: string + jsonPath: .spec.sgShardedCluster + - name: operation + type: string + jsonPath: .spec.op + - name: status + type: string + jsonPath: .status.conditions[?(@.status=="True")].reason + - name: started-at + type: string + jsonPath: .status.opStarted + priority: 1 + - name: retries + type: string + jsonPath: .status.opRetries + priority: 1 + schema: + openAPIV3Schema: + required: + - metadata + - spec + type: object + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 57 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the Sharded Database Operation. A database + operation represents a ""kind"" of operation on a StackGres cluster, + classified by a given name. The operation reference one SGCluster + by its name. Following [Kubernetes naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 `label`, an alphanumeric (a-z, and + 0-9) string, with the ''-'' character allowed anywhere except + the first or last character. + + + The name must be unique across all database operations in the + same namespace." + + ' + spec: + type: object + properties: + sgShardedCluster: + type: string + description: 'The name of SGShardedCluster on which the operation + will be performed. + + ' + scheduling: + type: object + description: Pod custom node scheduling and affinity configuration + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match a + node''s labels for the pod to be scheduled on that node. More + info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity scheduling + rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + matches the corresponding matchExpressions; the node(s) + with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects + (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. + The TopologySelectorTerm type implements a subset + of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union of the + results of one or more label queries over a set of nodes; + that is, it represents the OR of the selectors represented + by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. + The TopologySelectorTerm type implements a subset + of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values + array must be empty. If the operator is + Gt or Lt, the values array must have a + single element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which + indicate the highest priorities with the former being the + highest priority. Any other name must be defined by creating + a PriorityClass object with that name. If not specified, the + pod priority will be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the node + has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not + be scheduled onto the node. If the affinity requirements + specified by this field cease to be met at some point + during pod execution (e.g. due to a pod label update), + the system may or may not try to eventually evict the + pod from its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm are + intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MatchLabelKeys and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select + the group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of + namespace names that the term applies to. The term + is applied to the union of the namespaces listed + in this field and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter pod anti + affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling requirements + (resource request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to the + sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum are + the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the pod + will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met at + some point during pod execution (e.g. due to a pod label + update), the system may or may not try to eventually evict + the pod from its node. When there are multiple elements, + the lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not + co-located (anti-affinity) with, where co-located is + defined as running on a node whose value of the label + with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key in (value)` to select the + group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MatchLabelKeys and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod label + keys to select which pods will be taken into consideration. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are merged with + `LabelSelector` as `key notin (value)` to select + the group of existing pods which pods will be taken + into consideration for the incoming pod's pod (anti) + affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is + empty. The same key is forbidden to exist in both + MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys + cannot be set when LabelSelector isn't set. This + is an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of + namespace names that the term applies to. The term + is applied to the union of the namespaces listed + in this field and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey matches + that of any node on which any of the selected pods + is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + op: + type: string + description: 'The kind of operation that will be performed on the + SGCluster. Available operations are: + + + * `resharding`: perform a resharding of the cluster. + + * `restart`: perform a restart of the cluster. + + * `securityUpgrade`: perform a security upgrade of the cluster. + + ' + runAt: + type: string + description: 'An ISO 8601 date, that holds UTC scheduled date of + the operation execution. + + + If not specified or if the date it''s in the past, it will be + interpreted ASAP. + + ' + timeout: + type: string + description: 'An ISO 8601 duration in the format `PnDTnHnMn.nS`, + that specifies a timeout after which the operation execution will + be canceled. + + + If the operation can not be performed due to timeout expiration, + the condition `Failed` will have a status of `True` and the reason + will be `OperationTimedOut`. + + + If not specified the operation will never fail for timeout expiration. + + ' + maxRetries: + type: integer + description: 'The maximum number of retries the operation is allowed + to do after a failure. + + + A value of `0` (zero) means no retries are made. Defaults to: + `0`. + + ' + resharding: + type: object + description: "Configuration for resharding. Resharding a sharded\ + \ cluster is the operation that moves the data among shards in\ + \ order to try to\n balance the disk space used in each shard.\ + \ See also https://docs.citusdata.com/en/stable/develop/api_udf.html#citus-rebalance-start\n" + properties: + citus: + type: object + description: 'Citus specific resharding parameters + + ' + properties: + threshold: + type: number + description: 'A float number between 0.0 and 1.0 which indicates + the maximum difference ratio of node utilization from + average utilization. + + See also https://docs.citusdata.com/en/stable/develop/api_udf.html#citus-rebalance-start + + ' + drainOnly: + type: boolean + description: 'A float number between 0.0 and 1.0 which indicates + the maximum difference ratio of node utilization from + average utilization. + + See also https://docs.citusdata.com/en/stable/develop/api_udf.html#citus-rebalance-start + + ' + rebalanceStrategy: + type: string + description: 'The name of a strategy in Rebalancer strategy + table. Will pick a default one if not specified + + See also https://docs.citusdata.com/en/stable/develop/api_udf.html#citus-rebalance-start + + ' + restart: + type: object + description: 'Configuration of restart + + ' + properties: + method: + type: string + description: "The method used to perform the restart operation.\ + \ Available methods are:\n\n* `InPlace`: the in-place method\ + \ does not require more resources than those that are available.\n\ + \ In case only an instance of the StackGres cluster for the\ + \ coordinator or any shard is present\n this mean the service\ + \ disruption will last longer so we encourage use the reduced\ + \ impact restart\n and especially for a production environment.\n\ + * `ReducedImpact`: this procedure is the same as the in-place\ + \ method but require additional\n resources in order to spawn\ + \ a new updated replica that will be removed when the procedure\ + \ completes.\n" + onlyPendingRestart: + type: boolean + description: "By default all Pods are restarted. Setting this\ + \ option to `true` allow to restart only those Pods which\n\ + \ are in pending restart state as detected by the operation.\ + \ Defaults to: `false`.\n" + securityUpgrade: + type: object + description: 'Configuration of security upgrade + + ' + properties: + method: + type: string + description: "The method used to perform the security upgrade\ + \ operation. Available methods are:\n\n* `InPlace`: the in-place\ + \ method does not require more resources than those that are\ + \ available.\n In case only an instance of the StackGres\ + \ cluster is present this mean the service disruption will\n\ + \ last longer so we encourage use the reduced impact restart\ + \ and especially for a production environment.\n* `ReducedImpact`:\ + \ this procedure is the same as the in-place method but require\ + \ additional\n resources in order to spawn a new updated\ + \ replica that will be removed when the procedure completes.\n" + required: + - sgShardedCluster + - op + status: + type: object + properties: + conditions: + type: array + description: 'Possible conditions are: + + + * Running: to indicate when the operation is actually running + + * Completed: to indicate when the operation has completed successfully + + * Failed: to indicate when the operation has failed + + ' + items: + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition last transition. + type: string + status: + description: Status of the condition, one of `True`, `False` + or `Unknown`. + type: string + type: + description: Type of deployment condition. + type: string + opRetries: + type: integer + description: 'The number of retries performed by the operation + + ' + opStarted: + type: string + description: 'The ISO 8601 timestamp of when the operation started + running + + ' + restart: + type: object + description: 'The results of a restart + + ' + properties: + pendingToRestartSgClusters: + type: array + description: 'The SGClusters that are pending to be restarted + + ' + items: + type: string + restartedSgClusters: + type: array + description: 'The SGClusters that have been restarted + + ' + items: + type: string + failure: + type: string + description: 'A failure message (when available) + + ' + securityUpgrade: + type: object + description: 'The results of a security upgrade + + ' + properties: + pendingToRestartSgClusters: + type: array + description: 'The SGClusters that are pending to be restarted + + ' + items: + type: string + restartedSgClusters: + type: array + description: 'The SGClusters that have been restarted + + ' + items: + type: string + failure: + type: string + description: 'A failure message (when available) + + ' diff --git a/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgstreams.yaml b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgstreams.yaml new file mode 100644 index 00000000000..3c7233828b6 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/manifests/stackgres.io_sgstreams.yaml @@ -0,0 +1,5193 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sgstreams.stackgres.io +spec: + group: stackgres.io + scope: Namespaced + names: + kind: SGStream + listKind: SGStreamList + plural: sgstreams + singular: sgstream + shortNames: + - sgstr + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + metadata: + type: object + properties: + name: + type: string + maxLength: 56 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + description: 'Name of the StackGres stream. Following [Kubernetes + naming conventions](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/identifiers.md), + it must be an rfc1035/rfc1123 subdomain, that is, up to 253 characters + consisting of one or more lowercase labels separated by `.`. Where + each label is an alphanumeric (a-z, and 0-9) string, with the + `-` character allowed anywhere except the first or last character. + + ' + spec: + type: object + description: "Specification of the desired behavior of a StackGres stream.\n\ + \nA stream represent the process of performing a change data capture\ + \ (CDC) operation on a data source that generates a stream of event\ + \ containing information about the changes happening (or happened)\ + \ to the database in real time (or from the beginning).\n\nThe stream\ + \ allow to specify different types for the target of the CDC operation.\ + \ See `SGStream.spec.target.type`.\n\nThe stream perform two distinct\ + \ operation to generate data source changes for the target:\n\n* Snapshotting:\ + \ allows to capture the content of the data source in a specific point\ + \ in time and stream it as if they were changes, thus providing a\ + \ stream of events as they were an aggregate from the beginning of\ + \ the existence of the data source.\n* Streaming: allows to capture\ + \ the changes that are happening in real time in the data source and\ + \ stream them as changes continuously.\n\nThe CDC is performed using\ + \ [Debezium Engine](https://debezium.io/documentation/reference/stable/development/engine.html).\ + \ SGStream extends functionality of Debezium by providing a [custom\ + \ signaling channel](https://debezium.io/documentation/reference/stable/configuration/signalling.html#debezium-custom-signaling-channel)\ + \ that allow to send signals by simply adding annotation to the SGStream\ + \ resources.\nTo send a signal simply create an annotation with the\ + \ following formar:\n\n```\nmetadata:\n annotations:\n debezium-signal.stackgres.io/: \n```\n\nAlso, SGStream provide the following\ + \ custom singals implementations:\n \n * `tombstone`: allow to stop\ + \ completely Debezium streaming and the SGStream. This signal is useful\ + \ to give an end to the streaming in a graceful way allowing for the\ + \ removal of the logical slot created by Debezium.\n * `command`:\ + \ allow to execute any SQL command on the target database. Only available\ + \ then the target type is `SGCluster`.\n" + required: + - source + - target + properties: + source: + type: object + description: "The data source of the stream to which change data\ + \ capture will be applied. \n" + required: + - type + properties: + type: + type: string + description: 'The type of data source. Available data source + types are: + + + * `SGCluster`: an SGCluster in the same namespace + + * `Postgres`: any Postgres instance + + ' + sgCluster: + type: object + description: 'The configuration of the data source required + when type is `SGCluster`. + + ' + required: + - name + properties: + name: + type: string + description: 'The target SGCluster name. + + ' + database: + type: string + description: 'The target database name to which the CDC + process will connect to. + + + If not specified the default postgres database will be + targeted. + + ' + username: + type: object + description: 'The username used by the CDC process to connect + to the database. + + + If not specified the default superuser username (by default + postgres) will be used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the username is + stored. + + ' + key: + type: string + description: 'The Secret key where the username is stored. + + ' + password: + type: object + description: 'The password used by the CDC process to connect + to the database. + + + If not specified the default superuser password will be + used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the password is + stored. + + ' + key: + type: string + description: 'The Secret key where the password is stored. + + ' + includes: + type: array + description: 'A list of regular expressions that allow to + match one or more `.
.` entries + to be filtered before sending to the target. + + + This property is mutually exclusive with `excludes`. + + ' + items: + type: string + description: 'A regular expressions that allow to match + one or more `.
.` entries to be + filtered before sending to the target. + + ' + excludes: + type: array + description: 'A list of regular expressions that allow to + match one or more `.
.` entries + to be filtered out before sending to the target. + + + This property is mutually exclusive with `includes`. + + ' + items: + type: string + description: 'A regular expressions that allow to match + one or more `.
.` entries to be + filtered out before sending to the target. + + ' + debeziumProperties: + type: object + description: 'Specific property of the debezium Postgres + connector. + + + See https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-connector-properties + + + Each property is converted from myPropertyName to my.property.name + + ' + properties: + pluginName: + type: string + description: 'Default `pgoutput`. The name of the [PostgreSQL + logical decoding plug-in](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-output-plugin) + installed on the PostgreSQL server. Supported values + are decoderbufs, and pgoutput. + + ' + slotName: + type: string + description: 'Default . (with all characters that are not `[a-zA-Z0-9]` + changed to `_` character). The name of the PostgreSQL + logical decoding slot that was created for streaming + changes from a particular plug-in for a particular + database/schema. The server uses this slot to stream + events to the Debezium connector that you are configuring. + + + Slot names must conform to [PostgreSQL replication + slot naming rules](https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION), + which state: "Each replication slot has a name, which + can contain lower-case letters, numbers, and the underscore + character." + + ' + slotDropOnStop: + type: boolean + description: 'Default `true`. Whether or not to delete + the logical replication slot when the connector stops + in a graceful, expected way. The default behavior + is that the replication slot remains configured for + the connector when the connector stops. When the connector + restarts, having the same replication slot enables + the connector to start processing where it left off. + Set to true in only testing or development environments. + Dropping the slot allows the database to discard WAL + segments. When the connector restarts it performs + a new snapshot or it can continue from a persistent + offset in the Kafka Connect offsets topic. + + ' + publicationName: + type: string + description: 'Default . + (with all characters that are not `[a-zA-Z0-9]` changed + to `_` character). The name of the PostgreSQL publication + created for streaming changes when using pgoutput. + This publication is created at start-up if it does + not already exist and it includes all tables. Debezium + then applies its own include/exclude list filtering, + if configured, to limit the publication to change + events for the specific tables of interest. The connector + user must have superuser permissions to create this + publication, so it is usually preferable to create + the publication before starting the connector for + the first time. If the publication already exists, + either for all tables or configured with a subset + of tables, Debezium uses the publication as it is + defined. + + ' + skipMessagesWithoutChange: + type: boolean + description: 'Default `false`. Specifies whether to + skip publishing messages when there is no change in + included columns. This would essentially filter messages + if there is no change in columns included as per includes + or excludes fields. Note: Only works when REPLICA + IDENTITY of the table is set to FULL + + ' + timePrecisionMode: + type: string + description: 'Default `adaptive`. Time, date, and timestamps + can be represented with different kinds of precision: + + + * `adaptive`: captures the time and timestamp values + exactly as in the database using either millisecond, + microsecond, or nanosecond precision values based + on the database column’s type. + + * `adaptive_time_microseconds`: captures the date, + datetime and timestamp values exactly as in the database + using either millisecond, microsecond, or nanosecond + precision values based on the database column’s type. + An exception is TIME type fields, which are always + captured as microseconds. + + * `connect`: always represents time and timestamp + values by using Kafka Connect’s built-in representations + for Time, Date, and Timestamp, which use millisecond + precision regardless of the database columns'' precision. + For more information, see [temporal values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-temporal-types). + + ' + decimalHandlingMode: + type: string + description: 'Default `precise`. Specifies how the connector + should handle values for DECIMAL and NUMERIC columns: + + + * `precise`: represents values by using java.math.BigDecimal + to represent values in binary form in change events. + + * `double`: represents values by using double values, + which might result in a loss of precision but which + is easier to use. + + * `string`: encodes values as formatted strings, which + are easy to consume but semantic information about + the real type is lost. For more information, see [Decimal + types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-decimal-types). + + ' + hstoreHandlingMode: + type: string + description: 'Default `json`. Specifies how the connector + should handle values for hstore columns: + + + * `map`: represents values by using MAP. + + * `json`: represents values by using json string. + This setting encodes values as formatted strings such + as {"key" : "val"}. For more information, see [PostgreSQL + HSTORE type](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-hstore-type). + + ' + intervalHandlingMode: + type: string + description: "Default `numeric`. Specifies how the connector\ + \ should handle values for interval columns:\n\n *\ + \ `numeric`: represents intervals using approximate\ + \ number of microseconds.\n * `string`: represents\ + \ intervals exactly by using the string pattern representation\ + \ PYMDTHMS.\ + \ For example: P1Y2M3DT4H5M6.78S. For more information,\ + \ see [PostgreSQL basic types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-basic-types).\n" + tombstonesOnDelete: + type: boolean + description: 'Default `true`. Controls whether a delete + event is followed by a tombstone event. + + + * `true` - a delete operation is represented by a + delete event and a subsequent tombstone event. + + * `false` - only a delete event is emitted. + + + After a source record is deleted, emitting a tombstone + event (the default behavior) allows Kafka to completely + delete all events that pertain to the key of the deleted + row in case [log compaction](https://kafka.apache.org/documentation/#compaction) + is enabled for the topic. + + ' + columnTruncateToLengthChars: + type: array + items: + type: string + description: "An optional, list of regular expressions\ + \ that match the fully-qualified names of character-based\ + \ columns. Set this property if you want to truncate\ + \ the data in a set of columns when it exceeds the\ + \ number of characters specified by the length in\ + \ the property name. Set length to a positive integer\ + \ value, for example, column.truncate.to.20.chars.\n\ + \ The fully-qualified name of a column observes the\ + \ following format: ...\n\ + \ To match the name of a column, Debezium applies\ + \ the regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ column; the expression does not match substrings\ + \ that might be present in a column name.\n You can\ + \ specify multiple properties with different lengths\ + \ in a single configuration.\n" + columnMaskWithLengthChars: + type: array + items: + type: string + description: "An optional, list of regular expressions\ + \ that match the fully-qualified names of character-based\ + \ columns. Set this property if you want the connector\ + \ to mask the values for a set of columns, for example,\ + \ if they contain sensitive data. Set length to a\ + \ positive integer to replace data in the specified\ + \ columns with the number of asterisk (*) characters\ + \ specified by the length in the property name. Set\ + \ length to 0 (zero) to replace data in the specified\ + \ columns with an empty string.\n The fully-qualified\ + \ name of a column observes the following format:\ + \ schemaName.tableName.columnName.\n To match the\ + \ name of a column, Debezium applies the regular expression\ + \ that you specify as an anchored regular expression.\ + \ That is, the specified expression is matched against\ + \ the entire name string of the column; the expression\ + \ does not match substrings that might be present\ + \ in a column name.\n You can specify multiple properties\ + \ with different lengths in a single configuration.\n" + columnMaskHash: + type: object + description: "An optional section, that allow to specify,\ + \ for an hash algorithm and a salt, a list of regular\ + \ expressions that match the fully-qualified names\ + \ of character-based columns. Fully-qualified names\ + \ for columns are of the form ...\n\ + \ To match the name of a column Debezium applies the\ + \ regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ column; the expression does not match substrings\ + \ that might be present in a column name. In the resulting\ + \ change event record, the values for the specified\ + \ columns are replaced with pseudonyms.\n A pseudonym\ + \ consists of the hashed value that results from applying\ + \ the specified hashAlgorithm and salt. Based on the\ + \ hash function that is used, referential integrity\ + \ is maintained, while column values are replaced\ + \ with pseudonyms. Supported hash functions are described\ + \ in the [MessageDigest section](https://docs.oracle.com/javase/7/docs/technotes/guides/security/StandardNames.html#MessageDigest)\ + \ of the Java Cryptography Architecture Standard Algorithm\ + \ Name Documentation.\n In the following example,\ + \ CzQMA0cB5K is a randomly selected salt.\n columnMaskHash.SHA-256.CzQMA0cB5K=[inventory.orders.customerName,inventory.shipment.customerName]\n\ + \ If necessary, the pseudonym is automatically shortened\ + \ to the length of the column. The connector configuration\ + \ can include multiple properties that specify different\ + \ hash algorithms and salts.\n Depending on the hash\ + \ algorithm used, the salt selected, and the actual\ + \ data set, the resulting data set might not be completely\ + \ masked.\n" + additionalProperties: + type: object + description: The hash algorithm (e.g. SHA-256) type + and configuration. + additionalProperties: + type: object + description: The salt (e.g. CzQMA0cB5K) value and + configuration. + additionalProperties: + type: array + items: + type: string + description: The list of regular expressions that + match the fully-qualified names of character-based + columns (e.g. inventory.orders.customerName) + columnMaskHashV2: + type: object + description: "Similar to also columnMaskHash but using\ + \ hashing strategy version 2.\n Hashing strategy version\ + \ 2 should be used to ensure fidelity if the value\ + \ is being hashed in different places or systems.\n" + additionalProperties: + type: object + description: The hash algorithm (e.g. SHA-256) type + and configuration. + additionalProperties: + type: object + description: The salt (e.g. CzQMA0cB5K) value and + configuration. + additionalProperties: + type: array + items: + type: string + description: The list of regular expressions that + match the fully-qualified names of character-based + columns (e.g. inventory.orders.customerName) + columnPropagateSourceType: + type: array + items: + type: string + description: "Default `[.*]`. An optional, list of regular\ + \ expressions that match the fully-qualified names\ + \ of columns for which you want the connector to emit\ + \ extra parameters that represent column metadata.\ + \ When this property is set, the connector adds the\ + \ following fields to the schema of event records:\n\ + \n* `__debezium.source.column.type`\n* `__debezium.source.column.length`\n\ + * `__debezium.source.column.scale`\n\nThese parameters\ + \ propagate a column’s original type name and length\ + \ (for variable-width types), respectively.\n Enabling\ + \ the connector to emit this extra data can assist\ + \ in properly sizing specific numeric or character-based\ + \ columns in sink databases.\n The fully-qualified\ + \ name of a column observes one of the following formats:\ + \ databaseName.tableName.columnName, or databaseName.schemaName.tableName.columnName.\n\ + \ To match the name of a column, Debezium applies\ + \ the regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ column; the expression does not match substrings\ + \ that might be present in a column name.\n" + datatypePropagateSourceType: + type: array + items: + type: string + description: "Default `[.*]`. An optional, list of regular\ + \ expressions that specify the fully-qualified names\ + \ of data types that are defined for columns in a\ + \ database. When this property is set, for columns\ + \ with matching data types, the connector emits event\ + \ records that include the following extra fields\ + \ in their schema:\n\n* `__debezium.source.column.type`\n\ + * `__debezium.source.column.length`\n* `__debezium.source.column.scale`\n\ + \nThese parameters propagate a column’s original type\ + \ name and length (for variable-width types), respectively.\n\ + \ Enabling the connector to emit this extra data can\ + \ assist in properly sizing specific numeric or character-based\ + \ columns in sink databases.\n The fully-qualified\ + \ name of a column observes one of the following formats:\ + \ databaseName.tableName.typeName, or databaseName.schemaName.tableName.typeName.\n\ + \ To match the name of a data type, Debezium applies\ + \ the regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ data type; the expression does not match substrings\ + \ that might be present in a type name.\n For the\ + \ list of PostgreSQL-specific data type names, see\ + \ the [PostgreSQL data type mappings](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-data-types).\n" + messageKeyColumns: + type: array + items: + type: string + description: "A list of expressions that specify the\ + \ columns that the connector uses to form custom message\ + \ keys for change event records that it publishes\ + \ to the Kafka topics for specified tables.\n By default,\ + \ Debezium uses the primary key column of a table\ + \ as the message key for records that it emits. In\ + \ place of the default, or to specify a key for tables\ + \ that lack a primary key, you can configure custom\ + \ message keys based on one or more columns.\n To\ + \ establish a custom message key for a table, list\ + \ the table, followed by the columns to use as the\ + \ message key. Each list entry takes the following\ + \ format:\n :,\n\ + \ To base a table key on multiple column names, insert\ + \ commas between the column names.\n Each fully-qualified\ + \ table name is a regular expression in the following\ + \ format:\n .\n The property\ + \ can include entries for multiple tables. Use a semicolon\ + \ to separate table entries in the list.\n The following\ + \ example sets the message key for the tables inventory.customers\ + \ and purchase.orders:\n inventory.customers:pk1,pk2;(.*).purchaseorders:pk3,pk4\n\ + \ For the table inventory.customer, the columns pk1\ + \ and pk2 are specified as the message key. For the\ + \ purchaseorders tables in any schema, the columns\ + \ pk3 and pk4 server as the message key.\n There is\ + \ no limit to the number of columns that you use to\ + \ create custom message keys. However, it’s best to\ + \ use the minimum number that are required to specify\ + \ a unique key.\n Note that having this property set\ + \ and REPLICA IDENTITY set to DEFAULT on the tables,\ + \ will cause the tombstone events to not be created\ + \ properly if the key columns are not part of the\ + \ primary key of the table. Setting REPLICA IDENTITY\ + \ to FULL is the only solution.\n" + publicationAutocreateMode: + type: string + description: 'Default `all_tables`. Applies only when + streaming changes by using [the pgoutput plug-in](https://www.postgresql.org/docs/current/sql-createpublication.html). + The setting determines how creation of a [publication](https://www.postgresql.org/docs/current/logical-replication-publication.html) + should work. Specify one of the following values: + + + * `all_tables` - If a publication exists, the connector + uses it. If a publication does not exist, the connector + creates a publication for all tables in the database + for which the connector is capturing changes. For + the connector to create a publication it must access + the database through a database user account that + has permission to create publications and perform + replications. You grant the required permission by + using the following SQL command CREATE PUBLICATION + FOR ALL TABLES;. + + * `disabled` - The connector does not attempt to create + a publication. A database administrator or the user + configured to perform replications must have created + the publication before running the connector. If the + connector cannot find the publication, the connector + throws an exception and stops. + + * `filtered` - If a publication exists, the connector + uses it. If no publication exists, the connector creates + a new publication for tables that match the current + filter configuration as specified by the schema.include.list, + schema.exclude.list, and table.include.list, and table.exclude.list + connector configuration properties. For example: CREATE + PUBLICATION FOR TABLE . If the publication exists, the connector updates + the publication for tables that match the current + filter configuration. For example: ALTER PUBLICATION + SET TABLE . + + ' + replicaIdentityAutosetValues: + type: array + items: + type: string + description: "The setting determines the value for [replica\ + \ identity](https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY)\ + \ at table level.\n This option will overwrite the\ + \ existing value in database. A comma-separated list\ + \ of regular expressions that match fully-qualified\ + \ tables and replica identity value to be used in\ + \ the table.\n Each expression must match the pattern\ + \ ':',\ + \ where the table name could be defined as (SCHEMA_NAME.TABLE_NAME),\ + \ and the replica identity values are:\n DEFAULT\ + \ - Records the old values of the columns of the primary\ + \ key, if any. This is the default for non-system\ + \ tables.\n INDEX index_name - Records the old values\ + \ of the columns covered by the named index, that\ + \ must be unique, not partial, not deferrable, and\ + \ include only columns marked NOT NULL. If this index\ + \ is dropped, the behavior is the same as NOTHING.\n\ + \ FULL - Records the old values of all columns in\ + \ the row.\n NOTHING - Records no information about\ + \ the old row. This is the default for system tables.\n\ + \ For example,\n schema1.*:FULL,schema2.table2:NOTHING,schema2.table3:INDEX\ + \ idx_name\n" + binaryHandlingMode: + type: string + description: 'Default `bytes`. Specifies how binary + (bytea) columns should be represented in change events: + + + * `bytes` represents binary data as byte array. + + * `base64` represents binary data as base64-encoded + strings. + + * `base64-url-safe` represents binary data as base64-url-safe-encoded + strings. + + * `hex` represents binary data as hex-encoded (base16) + strings. + + ' + schemaNameAdjustmentMode: + type: string + description: 'Default `none`. Specifies how schema names + should be adjusted for compatibility with the message + converter used by the connector. Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used + in the Avro type name with underscore. + + * `avro_unicode` replaces the underscore or characters + that cannot be used in the Avro type name with corresponding + unicode like _uxxxx. Note: _ is an escape sequence + like backslash in Java + + ' + fieldNameAdjustmentMode: + type: string + description: 'Default `none`. Specifies how field names + should be adjusted for compatibility with the message + converter used by the connector. Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used + in the Avro type name with underscore. + + * `avro_unicode` replaces the underscore or characters + that cannot be used in the Avro type name with corresponding + unicode like _uxxxx. Note: _ is an escape sequence + like backslash in Java + + + For more information, see [Avro naming](https://debezium.io/documentation/reference/stable/configuration/avro.html#avro-naming). + + ' + moneyFractionDigits: + type: integer + description: 'Default `2`. Specifies how many decimal + digits should be used when converting Postgres money + type to java.math.BigDecimal, which represents the + values in change events. Applicable only when decimalHandlingMode + is set to precise. + + ' + converters: + type: object + additionalProperties: + type: object + additionalProperties: + type: string + description: "Enumerates a comma-separated list of the\ + \ symbolic names of the [custom converter](https://debezium.io/documentation/reference/stable/development/converters.html#custom-converters)\ + \ instances that the connector can use. For example,\n\ + \n```\nisbn:\n type: io.debezium.test.IsbnConverter\n\ + \ schemaName: io.debezium.postgresql.type.Isbn\n\ + ```\n\nYou must set the converters property to enable\ + \ the connector to use a custom converter.\n For each\ + \ converter that you configure for a connector, you\ + \ must also add a .type property, which specifies\ + \ the fully-qualified name of the class that implements\ + \ the converter interface.\nIf you want to further\ + \ control the behavior of a configured converter,\ + \ you can add one or more configuration parameters\ + \ to pass values to the converter. To associate any\ + \ additional configuration parameter with a converter,\ + \ prefix the parameter names with the symbolic name\ + \ of the converter.\n Each property is converted from\ + \ myPropertyName to my.property.name\n" + snapshotMode: + type: string + description: "Default `initial`. Specifies the criteria\ + \ for performing a snapshot when the connector starts:\n\ + \n* `always` - The connector performs a snapshot every\ + \ time that it starts. The snapshot includes the structure\ + \ and data of the captured tables. Specify this value\ + \ to populate topics with a complete representation\ + \ of the data from the captured tables every time\ + \ that the connector starts. After the snapshot completes,\ + \ the connector begins to stream event records for\ + \ subsequent database changes.\n* `initial` - The\ + \ connector performs a snapshot only when no offsets\ + \ have been recorded for the logical server name.\n\ + * `initial_only` - The connector performs an initial\ + \ snapshot and then stops, without processing any\ + \ subsequent changes.\n* `no_data` - The connector\ + \ never performs snapshots. When a connector is configured\ + \ this way, after it starts, it behaves as follows:\ + \ If there is a previously stored LSN in the Kafka\ + \ offsets topic, the connector continues streaming\ + \ changes from that position. If no LSN is stored,\ + \ the connector starts streaming changes from the\ + \ point in time when the PostgreSQL logical replication\ + \ slot was created on the server. Use this snapshot\ + \ mode only when you know all data of interest is\ + \ still reflected in the WAL.\n* `never` - Deprecated\ + \ see no_data.\n* `when_needed` - After the connector\ + \ starts, it performs a snapshot only if it detects\ + \ one of the following circumstances: \n It cannot\ + \ detect any topic offsets.\n A previously recorded\ + \ offset specifies a log position that is not available\ + \ on the server.\n* `configuration_based` - With this\ + \ option, you control snapshot behavior through a\ + \ set of connector properties that have the prefix\ + \ 'snapshotModeConfigurationBased'.\n* `custom` -\ + \ The connector performs a snapshot according to the\ + \ implementation specified by the snapshotModeCustomName\ + \ property, which defines a custom implementation\ + \ of the io.debezium.spi.snapshot.Snapshotter interface.\n\ + \nFor more information, see the [table of snapshot.mode\ + \ options](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-connector-snapshot-mode-options).\n" + snapshotModeConfigurationBasedSnapshotData: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector includes table data when it + performs a snapshot. + + ' + snapshotModeConfigurationBasedSnapshotSchema: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector includes the table schema when + it performs a snapshot. + + ' + snapshotModeConfigurationBasedStartStream: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector begins to stream change events + after a snapshot completes. + + ' + snapshotModeConfigurationBasedSnapshotOnSchemaError: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector includes table schema in a snapshot + if the schema history topic is not available. + + ' + snapshotModeConfigurationBasedSnapshotOnDataError: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, this property specifies + whether the connector attempts to snapshot table data + if it does not find the last committed offset in the + transaction log. Set the value to true to instruct + the connector to perform a new snapshot. + + ' + snapshotModeCustomName: + type: string + description: 'When snapshotMode is set as custom, use + this setting to specify the name of the custom implementation + provided in the name() method that is defined by the + ''io.debezium.spi.snapshot.Snapshotter'' interface. + The provided implementation is called after a connector + restart to determine whether to perform a snapshot. + For more information, see [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + snapshotLockingMode: + type: string + description: 'Default `none`. Specifies how the connector + holds locks on tables while performing a schema snapshot. + Set one of the following options: + + + * `shared`: The connector holds a table lock that + prevents exclusive table access during the initial + portion phase of the snapshot in which database schemas + and other metadata are read. After the initial phase, + the snapshot no longer requires table locks. + + * `none`: The connector avoids locks entirely. Do + not use this mode if schema changes might occur during + the snapshot. + + + > *WARNING*: Do not use this mode if schema changes + might occur during the snapshot. + + + * `custom`: The connector performs a snapshot according + to the implementation specified by the snapshotLockingModeCustomName + property, which is a custom implementation of the + io.debezium.spi.snapshot.SnapshotLock interface. + + ' + snapshotLockingModeCustomName: + type: string + description: 'When snapshotLockingMode is set to custom, + use this setting to specify the name of the custom + implementation provided in the name() method that + is defined by the ''io.debezium.spi.snapshot.SnapshotLock'' + interface. For more information, see [custom snapshotter + SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + snapshotQueryMode: + type: string + description: 'Default `select_all`. Specifies how the + connector queries data while performing a snapshot. + Set one of the following options: + + + * `select_all`: The connector performs a select all + query by default, optionally adjusting the columns + selected based on the column include and exclude list + configurations. + + * `custom`: The connector performs a snapshot query + according to the implementation specified by the snapshotQueryModeCustomName + property, which defines a custom implementation of + the io.debezium.spi.snapshot.SnapshotQuery interface. + This setting enables you to manage snapshot content + in a more flexible manner compared to using the snapshotSelectStatementOverrides + property. + + ' + snapshotQueryModeCustomName: + type: string + description: 'When snapshotQueryMode is set as custom, + use this setting to specify the name of the custom + implementation provided in the name() method that + is defined by the ''io.debezium.spi.snapshot.SnapshotQuery'' + interface. For more information, see [custom snapshotter + SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + snapshotIncludeCollectionList: + type: array + items: + type: string + description: "Default . An optional, list of regular\ + \ expressions that match the fully-qualified names\ + \ (.) of the tables to include\ + \ in a snapshot. The specified items must be named\ + \ in the connector’s table.include.list property.\ + \ This property takes effect only if the connector’s\ + \ snapshotMode property is set to a value other than\ + \ `never`. This property does not affect the behavior\ + \ of incremental snapshots.\n To match the name\ + \ of a table, Debezium applies the regular expression\ + \ that you specify as an anchored regular expression.\ + \ That is, the specified expression is matched against\ + \ the entire name string of the table; it does not\ + \ match substrings that might be present in a table\ + \ name.\n" + snapshotLockTimeoutMs: + type: integer + description: 'Default `10000`. Positive integer value + that specifies the maximum amount of time (in milliseconds) + to wait to obtain table locks when performing a snapshot. + If the connector cannot acquire table locks in this + time interval, the snapshot fails. [How the connector + performs snapshots](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-snapshots) + provides details. + + ' + snapshotSelectStatementOverrides: + type: object + additionalProperties: + type: string + description: "Specifies the table rows to include in\ + \ a snapshot. Use the property if you want a snapshot\ + \ to include only a subset of the rows in a table.\ + \ This property affects snapshots only. It does not\ + \ apply to events that the connector reads from the\ + \ log.\n The property contains a hierarchy of fully-qualified\ + \ table names in the form ..\ + \ For example,\n\n```\nsnapshotSelectStatementOverrides:\ + \ \n \"customers.orders\": \"SELECT * FROM [customers].[orders]\ + \ WHERE delete_flag = 0 ORDER BY id DESC\"\n```\n\n\ + In the resulting snapshot, the connector includes\ + \ only the records for which delete_flag = 0.\n" + eventProcessingFailureHandlingMode: + type: string + description: 'Default `fail`. Specifies how the connector + should react to exceptions during processing of events: + + + * `fail`: propagates the exception, indicates the + offset of the problematic event, and causes the connector + to stop. + + * `warn`: logs the offset of the problematic event, + skips that event, and continues processing. + + * `skip`: skips the problematic event and continues + processing. + + ' + maxBatchSize: + type: integer + description: 'Default `2048`. Positive integer value + that specifies the maximum size of each batch of events + that the connector processes. + + ' + maxQueueSize: + type: integer + description: 'Default `8192`. Positive integer value + that specifies the maximum number of records that + the blocking queue can hold. When Debezium reads events + streamed from the database, it places the events in + the blocking queue before it writes them to Kafka. + The blocking queue can provide backpressure for reading + change events from the database in cases where the + connector ingests messages faster than it can write + them to Kafka, or when Kafka becomes unavailable. + Events that are held in the queue are disregarded + when the connector periodically records offsets. Always + set the value of maxQueueSize to be larger than the + value of maxBatchSize. + + ' + maxQueueSizeInBytes: + type: integer + description: "Default `0`. A long integer value that\ + \ specifies the maximum volume of the blocking queue\ + \ in bytes. By default, volume limits are not specified\ + \ for the blocking queue. To specify the number of\ + \ bytes that the queue can consume, set this property\ + \ to a positive long value.\n If maxQueueSize is also\ + \ set, writing to the queue is blocked when the size\ + \ of the queue reaches the limit specified by either\ + \ property. For example, if you set maxQueueSize=1000,\ + \ and maxQueueSizeInBytes=5000, writing to the queue\ + \ is blocked after the queue contains 1000 records,\ + \ or after the volume of the records in the queue\ + \ reaches 5000 bytes.\n" + pollIntervalMs: + type: integer + description: 'Default `500`. Positive integer value + that specifies the number of milliseconds the connector + should wait for new change events to appear before + it starts processing a batch of events. Defaults to + 500 milliseconds. + + ' + includeUnknownDatatypes: + type: boolean + description: "Default `true`. Specifies connector behavior\ + \ when the connector encounters a field whose data\ + \ type is unknown. The default behavior is that the\ + \ connector omits the field from the change event\ + \ and logs a warning.\n Set this property to true\ + \ if you want the change event to contain an opaque\ + \ binary representation of the field. This lets consumers\ + \ decode the field. You can control the exact representation\ + \ by setting the binaryHandlingMode property.\n> *NOTE*:\ + \ Consumers risk backward compatibility issues when\ + \ `includeUnknownDatatypes` is set to `true`. Not\ + \ only may the database-specific binary representation\ + \ change between releases, but if the data type is\ + \ eventually supported by Debezium, the data type\ + \ will be sent downstream in a logical type, which\ + \ would require adjustments by consumers. In general,\ + \ when encountering unsupported data types, create\ + \ a feature request so that support can be added.\n" + databaseInitialStatements: + type: array + items: + type: string + description: "A list of SQL statements that the connector\ + \ executes when it establishes a JDBC connection to\ + \ the database.\n The connector may establish JDBC\ + \ connections at its own discretion. Consequently,\ + \ this property is useful for configuration of session\ + \ parameters only, and not for executing DML statements.\n\ + \ The connector does not execute these statements\ + \ when it creates a connection for reading the transaction\ + \ log.\n" + statusUpdateIntervalMs: + type: integer + description: 'Default `10000`. Frequency for sending + replication connection status updates to the server, + given in milliseconds. The property also controls + how frequently the database status is checked to detect + a dead connection in case the database was shut down. + + ' + heartbeatIntervalMs: + type: integer + description: "Default `0`. Controls how frequently the\ + \ connector sends heartbeat messages to a Kafka topic.\ + \ The default behavior is that the connector does\ + \ not send heartbeat messages.\n Heartbeat messages\ + \ are useful for monitoring whether the connector\ + \ is receiving change events from the database. Heartbeat\ + \ messages might help decrease the number of change\ + \ events that need to be re-sent when a connector\ + \ restarts. To send heartbeat messages, set this property\ + \ to a positive integer, which indicates the number\ + \ of milliseconds between heartbeat messages.\n Heartbeat\ + \ messages are needed when there are many updates\ + \ in a database that is being tracked but only a tiny\ + \ number of updates are related to the table(s) and\ + \ schema(s) for which the connector is capturing changes.\ + \ In this situation, the connector reads from the\ + \ database transaction log as usual but rarely emits\ + \ change records to Kafka. This means that no offset\ + \ updates are committed to Kafka and the connector\ + \ does not have an opportunity to send the latest\ + \ retrieved LSN to the database. The database retains\ + \ WAL files that contain events that have already\ + \ been processed by the connector. Sending heartbeat\ + \ messages enables the connector to send the latest\ + \ retrieved LSN to the database, which allows the\ + \ database to reclaim disk space being used by no\ + \ longer needed WAL files.\n" + heartbeatActionQuery: + type: string + description: "Specifies a query that the connector executes\ + \ on the source database when the connector sends\ + \ a heartbeat message.\n This is useful for resolving\ + \ the situation described in [WAL disk space consumption](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-wal-disk-space),\ + \ where capturing changes from a low-traffic database\ + \ on the same host as a high-traffic database prevents\ + \ Debezium from processing WAL records and thus acknowledging\ + \ WAL positions with the database. To address this\ + \ situation, create a heartbeat table in the low-traffic\ + \ database, and set this property to a statement that\ + \ inserts records into that table, for example:\n\n\ + \ ```\n INSERT INTO test_heartbeat_table (text) VALUES\ + \ ('test_heartbeat')\n ```\n \n This allows the connector\ + \ to receive changes from the low-traffic database\ + \ and acknowledge their LSNs, which prevents unbounded\ + \ WAL growth on the database host.\n" + schemaRefreshMode: + type: string + description: 'Default `columns_diff`. Specify the conditions + that trigger a refresh of the in-memory schema for + a table. + + + * `columns_diff`: is the safest mode. It ensures that + the in-memory schema stays in sync with the database + table’s schema at all times. + + * `columns_diff_exclude_unchanged_toast`: instructs + the connector to refresh the in-memory schema cache + if there is a discrepancy with the schema derived + from the incoming message, unless unchanged TOASTable + data fully accounts for the discrepancy. + + + This setting can significantly improve connector performance + if there are frequently-updated tables that have TOASTed + data that are rarely part of updates. However, it + is possible for the in-memory schema to become outdated + if TOASTable columns are dropped from the table. + + ' + snapshotDelayMs: + type: integer + description: 'An interval in milliseconds that the connector + should wait before performing a snapshot when the + connector starts. If you are starting multiple connectors + in a cluster, this property is useful for avoiding + snapshot interruptions, which might cause re-balancing + of connectors. + + ' + snapshotFetchSize: + type: integer + description: 'Default `10240`. During a snapshot, the + connector reads table content in batches of rows. + This property specifies the maximum number of rows + in a batch. + + ' + slotStreamParams: + type: object + description: "Parameters to pass to the configured logical\ + \ decoding plug-in. For example:\n\n```\nslotStreamParams:\n\ + \ add-tables: \"public.table,public.table2\"\n include-lsn:\ + \ \"true\"\n```\n" + additionalProperties: + type: string + slotMaxRetries: + type: integer + description: 'Default `6`. If connecting to a replication + slot fails, this is the maximum number of consecutive + attempts to connect. + + ' + slotRetryDelayMs: + type: integer + description: 'Default `10000` (10 seconds). The number + of milliseconds to wait between retry attempts when + the connector fails to connect to a replication slot. + + ' + unavailableValuePlaceholder: + type: string + description: 'Default `__debezium_unavailable_value`. + Specifies the constant that the connector provides + to indicate that the original value is a toasted value + that is not provided by the database. If the setting + of unavailable.value.placeholder starts with the hex: + prefix it is expected that the rest of the string + represents hexadecimally encoded octets. For more + information, see [toasted values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-toasted-values). + + ' + provideTransactionMetadata: + type: boolean + description: 'Default `false`. Determines whether the + connector generates events with transaction boundaries + and enriches change event envelopes with transaction + metadata. Specify true if you want the connector to + do this. For more information, see [Transaction metadata](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-transaction-metadata). + + ' + flushLsnSource: + type: boolean + description: 'Default `true`. Determines whether the + connector should commit the LSN of the processed records + in the source postgres database so that the WAL logs + can be deleted. Specify false if you don’t want the + connector to do this. Please note that if set to false + LSN will not be acknowledged by Debezium and as a + result WAL logs will not be cleared which might result + in disk space issues. User is expected to handle the + acknowledgement of LSN outside Debezium. + + ' + retriableRestartConnectorWaitMs: + type: integer + description: 'Default `10000` (10 seconds). The number + of milliseconds to wait before restarting a connector + after a retriable error occurs. + + ' + skippedOperations: + type: array + items: + type: string + description: 'Default `none`. A list of operation types + that will be skipped during streaming. The operations + include: c for inserts/create, u for updates, d for + deletes, t for truncates, and none to not skip any + operations. By default, no operations are skipped. + + ' + signalDataCollection: + type: string + description: 'Fully-qualified name of the data collection + that is used to send signals to the connector. Use + the following format to specify the collection name: + . + + ' + signalEnabledChannels: + type: array + items: + type: string + description: 'Default `[sgstream-annotations]`. List + of the signaling channel names that are enabled for + the connector. By default, the following channels + are available: sgstream-annotations, source, kafka, + file and jmx. Optionally, you can also implement a + [custom signaling channel](https://debezium.io/documentation/reference/stable/configuration/signalling.html#debezium-signaling-enabling-custom-signaling-channel). + + ' + notificationEnabledChannels: + type: array + items: + type: string + description: 'List of notification channel names that + are enabled for the connector. By default, the following + channels are available: sink, log and jmx. Optionally, + you can also implement a [custom notification channel](https://debezium.io/documentation/reference/stable/configuration/signalling.html#debezium-signaling-enabling-custom-signaling-channel). + + ' + incrementalSnapshotChunkSize: + type: integer + description: 'Default `1024`. The maximum number of + rows that the connector fetches and reads into memory + during an incremental snapshot chunk. Increasing the + chunk size provides greater efficiency, because the + snapshot runs fewer snapshot queries of a greater + size. However, larger chunk sizes also require more + memory to buffer the snapshot data. Adjust the chunk + size to a value that provides the best performance + in your environment. + + ' + incrementalSnapshotWatermarkingStrategy: + type: string + description: 'Default `insert_insert`. Specifies the + watermarking mechanism that the connector uses during + an incremental snapshot to deduplicate events that + might be captured by an incremental snapshot and then + recaptured after streaming resumes. + + + You can specify one of the following options: + + + * `insert_insert`: When you send a signal to initiate + an incremental snapshot, for every chunk that Debezium + reads during the snapshot, it writes an entry to the + signaling data collection to record the signal to + open the snapshot window. After the snapshot completes, + Debezium inserts a second entry to record the closing + of the window. + + * `insert_delete`: When you send a signal to initiate + an incremental snapshot, for every chunk that Debezium + reads, it writes a single entry to the signaling data + collection to record the signal to open the snapshot + window. After the snapshot completes, this entry is + removed. No entry is created for the signal to close + the snapshot window. Set this option to prevent rapid + growth of the signaling data collection. + + ' + xminFetchIntervalMs: + type: integer + description: 'Default `0`. How often, in milliseconds, + the XMIN will be read from the replication slot. The + XMIN value provides the lower bounds of where a new + replication slot could start from. The default value + of 0 disables tracking XMIN tracking. + + ' + topicNamingStrategy: + type: string + description: 'Default `io.debezium.schema.SchemaTopicNamingStrategy`. + The name of the TopicNamingStrategy class that should + be used to determine the topic name for data change, + schema change, transaction, heartbeat event etc., + defaults to SchemaTopicNamingStrategy. + + ' + topicDelimiter: + type: string + description: 'Default `.`. Specify the delimiter for + topic name, defaults to ".". + + ' + topicCacheSize: + type: integer + description: 'Default `10000`. The size used for holding + the topic names in bounded concurrent hash map. This + cache will help to determine the topic name corresponding + to a given data collection. + + ' + topicHeartbeatPrefix: + type: string + description: 'Default `__debezium-heartbeat`. Controls + the name of the topic to which the connector sends + heartbeat messages. For example, if the topic prefix + is fulfillment, the default topic name is __debezium-heartbeat.fulfillment. + + ' + topicTransaction: + type: string + description: 'Default `transaction`. Controls the name + of the topic to which the connector sends transaction + metadata messages. For example, if the topic prefix + is fulfillment, the default topic name is fulfillment.transaction. + + ' + snapshotMaxThreads: + type: integer + description: 'Default `1`. Specifies the number of threads + that the connector uses when performing an initial + snapshot. To enable parallel initial snapshots, set + the property to a value greater than 1. In a parallel + initial snapshot, the connector processes multiple + tables concurrently. This feature is incubating. + + ' + customMetricTags: + type: object + additionalProperties: + type: string + description: "The custom metric tags will accept key-value\ + \ pairs to customize the MBean object name which should\ + \ be appended the end of regular name, each key would\ + \ represent a tag for the MBean object name, and the\ + \ corresponding value would be the value of that tag\ + \ the key is. For example:\n\n```\ncustomMetricTags:\n\ + \ k1: v1\n k2: v2\n```\n" + errorsMaxRetries: + type: integer + description: 'Default `-1`. Specifies how the connector + responds after an operation that results in a retriable + error, such as a connection error. + + + Set one of the following options: + + + * `-1`: No limit. The connector always restarts automatically, + and retries the operation, regardless of the number + of previous failures. + + * `0`: Disabled. The connector fails immediately, + and never retries the operation. User intervention + is required to restart the connector. + + * `> 0`: The connector restarts automatically until + it reaches the specified maximum number of retries. + After the next failure, the connector stops, and user + intervention is required to restart it. + + ' + postgres: + type: object + description: 'The configuration of the data source required + when type is `Postgres`. + + ' + required: + - host + properties: + host: + type: string + description: 'The hostname of the Postgres instance. + + ' + port: + type: integer + description: 'The port of the Postgres instance. When not + specified port 5432 will be used. + + ' + database: + type: string + description: 'The target database name to which the CDC + process will connect to. + + + If not specified the default postgres database will be + targeted. + + ' + username: + type: object + description: 'The username used by the CDC process to connect + to the database. + + + If not specified the default superuser username (by default + postgres) will be used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the username is + stored. + + ' + key: + type: string + description: 'The Secret key where the username is stored. + + ' + password: + type: object + description: 'The password used by the CDC process to connect + to the database. + + + If not specified the default superuser password will be + used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the password is + stored. + + ' + key: + type: string + description: 'The Secret key where the password is stored. + + ' + includes: + type: array + description: 'A list of regular expressions that allow to + match one or more `.
.` entries + to be filtered before sending to the target. + + + This property is mutually exclusive with `excludes`. + + ' + items: + type: string + description: 'A regular expressions that allow to match + one or more `.
.` entries to be + filtered before sending to the target. + + ' + excludes: + type: array + description: 'A list of regular expressions that allow to + match one or more `.
.` entries + to be filtered out before sending to the target. + + + This property is mutually exclusive with `includes`. + + ' + items: + type: string + description: 'A regular expressions that allow to match + one or more `.
.` entries to be + filtered out before sending to the target. + + ' + debeziumProperties: + type: object + description: 'Specific property of the debezium Postgres + connector. + + + See https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-connector-properties + + + Each property is converted from myPropertyName to my.property.name + + ' + properties: + pluginName: + type: string + description: 'Default `pgoutput`. The name of the [PostgreSQL + logical decoding plug-in](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-output-plugin) + installed on the PostgreSQL server. Supported values + are decoderbufs, and pgoutput. + + ' + slotName: + type: string + description: 'Default . (with all characters that are not `[a-zA-Z0-9]` + changed to `_` character). The name of the PostgreSQL + logical decoding slot that was created for streaming + changes from a particular plug-in for a particular + database/schema. The server uses this slot to stream + events to the Debezium connector that you are configuring. + + + Slot names must conform to [PostgreSQL replication + slot naming rules](https://www.postgresql.org/docs/current/static/warm-standby.html#STREAMING-REPLICATION-SLOTS-MANIPULATION), + which state: "Each replication slot has a name, which + can contain lower-case letters, numbers, and the underscore + character." + + ' + slotDropOnStop: + type: boolean + description: 'Default `true`. Whether or not to delete + the logical replication slot when the connector stops + in a graceful, expected way. The default behavior + is that the replication slot remains configured for + the connector when the connector stops. When the connector + restarts, having the same replication slot enables + the connector to start processing where it left off. + Set to true in only testing or development environments. + Dropping the slot allows the database to discard WAL + segments. When the connector restarts it performs + a new snapshot or it can continue from a persistent + offset in the Kafka Connect offsets topic. + + ' + publicationName: + type: string + description: 'Default . + (with all characters that are not `[a-zA-Z0-9]` changed + to `_` character). The name of the PostgreSQL publication + created for streaming changes when using pgoutput. + This publication is created at start-up if it does + not already exist and it includes all tables. Debezium + then applies its own include/exclude list filtering, + if configured, to limit the publication to change + events for the specific tables of interest. The connector + user must have superuser permissions to create this + publication, so it is usually preferable to create + the publication before starting the connector for + the first time. If the publication already exists, + either for all tables or configured with a subset + of tables, Debezium uses the publication as it is + defined. + + ' + skipMessagesWithoutChange: + type: boolean + description: 'Default `false`. Specifies whether to + skip publishing messages when there is no change in + included columns. This would essentially filter messages + if there is no change in columns included as per includes + or excludes fields. Note: Only works when REPLICA + IDENTITY of the table is set to FULL + + ' + timePrecisionMode: + type: string + description: 'Default `adaptive`. Time, date, and timestamps + can be represented with different kinds of precision: + + + * `adaptive`: captures the time and timestamp values + exactly as in the database using either millisecond, + microsecond, or nanosecond precision values based + on the database column’s type. + + * `adaptive_time_microseconds`: captures the date, + datetime and timestamp values exactly as in the database + using either millisecond, microsecond, or nanosecond + precision values based on the database column’s type. + An exception is TIME type fields, which are always + captured as microseconds. + + * `connect`: always represents time and timestamp + values by using Kafka Connect’s built-in representations + for Time, Date, and Timestamp, which use millisecond + precision regardless of the database columns'' precision. + For more information, see [temporal values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-temporal-types). + + ' + decimalHandlingMode: + type: string + description: 'Default `precise`. Specifies how the connector + should handle values for DECIMAL and NUMERIC columns: + + + * `precise`: represents values by using java.math.BigDecimal + to represent values in binary form in change events. + + * `double`: represents values by using double values, + which might result in a loss of precision but which + is easier to use. + + * `string`: encodes values as formatted strings, which + are easy to consume but semantic information about + the real type is lost. For more information, see [Decimal + types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-decimal-types). + + ' + hstoreHandlingMode: + type: string + description: 'Default `json`. Specifies how the connector + should handle values for hstore columns: + + + * `map`: represents values by using MAP. + + * `json`: represents values by using json string. + This setting encodes values as formatted strings such + as {"key" : "val"}. For more information, see [PostgreSQL + HSTORE type](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-hstore-type). + + ' + intervalHandlingMode: + type: string + description: "Default `numeric`. Specifies how the connector\ + \ should handle values for interval columns:\n\n *\ + \ `numeric`: represents intervals using approximate\ + \ number of microseconds.\n * `string`: represents\ + \ intervals exactly by using the string pattern representation\ + \ PYMDTHMS.\ + \ For example: P1Y2M3DT4H5M6.78S. For more information,\ + \ see [PostgreSQL basic types](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-basic-types).\n" + tombstonesOnDelete: + type: boolean + description: 'Default `true`. Controls whether a delete + event is followed by a tombstone event. + + + * `true` - a delete operation is represented by a + delete event and a subsequent tombstone event. + + * `false` - only a delete event is emitted. + + + After a source record is deleted, emitting a tombstone + event (the default behavior) allows Kafka to completely + delete all events that pertain to the key of the deleted + row in case [log compaction](https://kafka.apache.org/documentation/#compaction) + is enabled for the topic. + + ' + columnTruncateToLengthChars: + type: array + items: + type: string + description: "An optional, list of regular expressions\ + \ that match the fully-qualified names of character-based\ + \ columns. Set this property if you want to truncate\ + \ the data in a set of columns when it exceeds the\ + \ number of characters specified by the length in\ + \ the property name. Set length to a positive integer\ + \ value, for example, column.truncate.to.20.chars.\n\ + \ The fully-qualified name of a column observes the\ + \ following format: ...\n\ + \ To match the name of a column, Debezium applies\ + \ the regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ column; the expression does not match substrings\ + \ that might be present in a column name.\n You can\ + \ specify multiple properties with different lengths\ + \ in a single configuration.\n" + columnMaskWithLengthChars: + type: array + items: + type: string + description: "An optional, list of regular expressions\ + \ that match the fully-qualified names of character-based\ + \ columns. Set this property if you want the connector\ + \ to mask the values for a set of columns, for example,\ + \ if they contain sensitive data. Set length to a\ + \ positive integer to replace data in the specified\ + \ columns with the number of asterisk (*) characters\ + \ specified by the length in the property name. Set\ + \ length to 0 (zero) to replace data in the specified\ + \ columns with an empty string.\n The fully-qualified\ + \ name of a column observes the following format:\ + \ schemaName.tableName.columnName.\n To match the\ + \ name of a column, Debezium applies the regular expression\ + \ that you specify as an anchored regular expression.\ + \ That is, the specified expression is matched against\ + \ the entire name string of the column; the expression\ + \ does not match substrings that might be present\ + \ in a column name.\n You can specify multiple properties\ + \ with different lengths in a single configuration.\n" + columnMaskHash: + type: object + description: "An optional section, that allow to specify,\ + \ for an hash algorithm and a salt, a list of regular\ + \ expressions that match the fully-qualified names\ + \ of character-based columns. Fully-qualified names\ + \ for columns are of the form ...\n\ + \ To match the name of a column Debezium applies the\ + \ regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ column; the expression does not match substrings\ + \ that might be present in a column name. In the resulting\ + \ change event record, the values for the specified\ + \ columns are replaced with pseudonyms.\n A pseudonym\ + \ consists of the hashed value that results from applying\ + \ the specified hashAlgorithm and salt. Based on the\ + \ hash function that is used, referential integrity\ + \ is maintained, while column values are replaced\ + \ with pseudonyms. Supported hash functions are described\ + \ in the [MessageDigest section](https://docs.oracle.com/javase/7/docs/technotes/guides/security/StandardNames.html#MessageDigest)\ + \ of the Java Cryptography Architecture Standard Algorithm\ + \ Name Documentation.\n In the following example,\ + \ CzQMA0cB5K is a randomly selected salt.\n columnMaskHash.SHA-256.CzQMA0cB5K=[inventory.orders.customerName,inventory.shipment.customerName]\n\ + \ If necessary, the pseudonym is automatically shortened\ + \ to the length of the column. The connector configuration\ + \ can include multiple properties that specify different\ + \ hash algorithms and salts.\n Depending on the hash\ + \ algorithm used, the salt selected, and the actual\ + \ data set, the resulting data set might not be completely\ + \ masked.\n" + additionalProperties: + type: object + description: The hash algorithm (e.g. SHA-256) type + and configuration. + additionalProperties: + type: object + description: The salt (e.g. CzQMA0cB5K) value and + configuration. + additionalProperties: + type: array + items: + type: string + description: The list of regular expressions that + match the fully-qualified names of character-based + columns (e.g. inventory.orders.customerName) + columnMaskHashV2: + type: object + description: "Similar to also columnMaskHash but using\ + \ hashing strategy version 2.\n Hashing strategy version\ + \ 2 should be used to ensure fidelity if the value\ + \ is being hashed in different places or systems.\n" + additionalProperties: + type: object + description: The hash algorithm (e.g. SHA-256) type + and configuration. + additionalProperties: + type: object + description: The salt (e.g. CzQMA0cB5K) value and + configuration. + additionalProperties: + type: array + items: + type: string + description: The list of regular expressions that + match the fully-qualified names of character-based + columns (e.g. inventory.orders.customerName) + columnPropagateSourceType: + type: array + items: + type: string + description: "Default `[.*]`. An optional, list of regular\ + \ expressions that match the fully-qualified names\ + \ of columns for which you want the connector to emit\ + \ extra parameters that represent column metadata.\ + \ When this property is set, the connector adds the\ + \ following fields to the schema of event records:\n\ + \n* `__debezium.source.column.type`\n* `__debezium.source.column.length`\n\ + * `__debezium.source.column.scale`\n\nThese parameters\ + \ propagate a column’s original type name and length\ + \ (for variable-width types), respectively.\n Enabling\ + \ the connector to emit this extra data can assist\ + \ in properly sizing specific numeric or character-based\ + \ columns in sink databases.\n The fully-qualified\ + \ name of a column observes one of the following formats:\ + \ databaseName.tableName.columnName, or databaseName.schemaName.tableName.columnName.\n\ + \ To match the name of a column, Debezium applies\ + \ the regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ column; the expression does not match substrings\ + \ that might be present in a column name.\n" + datatypePropagateSourceType: + type: array + items: + type: string + description: "Default `[.*]`. An optional, list of regular\ + \ expressions that specify the fully-qualified names\ + \ of data types that are defined for columns in a\ + \ database. When this property is set, for columns\ + \ with matching data types, the connector emits event\ + \ records that include the following extra fields\ + \ in their schema:\n\n* `__debezium.source.column.type`\n\ + * `__debezium.source.column.length`\n* `__debezium.source.column.scale`\n\ + \nThese parameters propagate a column’s original type\ + \ name and length (for variable-width types), respectively.\n\ + \ Enabling the connector to emit this extra data can\ + \ assist in properly sizing specific numeric or character-based\ + \ columns in sink databases.\n The fully-qualified\ + \ name of a column observes one of the following formats:\ + \ databaseName.tableName.typeName, or databaseName.schemaName.tableName.typeName.\n\ + \ To match the name of a data type, Debezium applies\ + \ the regular expression that you specify as an anchored\ + \ regular expression. That is, the specified expression\ + \ is matched against the entire name string of the\ + \ data type; the expression does not match substrings\ + \ that might be present in a type name.\n For the\ + \ list of PostgreSQL-specific data type names, see\ + \ the [PostgreSQL data type mappings](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-data-types).\n" + messageKeyColumns: + type: array + items: + type: string + description: "A list of expressions that specify the\ + \ columns that the connector uses to form custom message\ + \ keys for change event records that it publishes\ + \ to the Kafka topics for specified tables.\n By default,\ + \ Debezium uses the primary key column of a table\ + \ as the message key for records that it emits. In\ + \ place of the default, or to specify a key for tables\ + \ that lack a primary key, you can configure custom\ + \ message keys based on one or more columns.\n To\ + \ establish a custom message key for a table, list\ + \ the table, followed by the columns to use as the\ + \ message key. Each list entry takes the following\ + \ format:\n :,\n\ + \ To base a table key on multiple column names, insert\ + \ commas between the column names.\n Each fully-qualified\ + \ table name is a regular expression in the following\ + \ format:\n .\n The property\ + \ can include entries for multiple tables. Use a semicolon\ + \ to separate table entries in the list.\n The following\ + \ example sets the message key for the tables inventory.customers\ + \ and purchase.orders:\n inventory.customers:pk1,pk2;(.*).purchaseorders:pk3,pk4\n\ + \ For the table inventory.customer, the columns pk1\ + \ and pk2 are specified as the message key. For the\ + \ purchaseorders tables in any schema, the columns\ + \ pk3 and pk4 server as the message key.\n There is\ + \ no limit to the number of columns that you use to\ + \ create custom message keys. However, it’s best to\ + \ use the minimum number that are required to specify\ + \ a unique key.\n Note that having this property set\ + \ and REPLICA IDENTITY set to DEFAULT on the tables,\ + \ will cause the tombstone events to not be created\ + \ properly if the key columns are not part of the\ + \ primary key of the table. Setting REPLICA IDENTITY\ + \ to FULL is the only solution.\n" + publicationAutocreateMode: + type: string + description: 'Default `all_tables`. Applies only when + streaming changes by using [the pgoutput plug-in](https://www.postgresql.org/docs/current/sql-createpublication.html). + The setting determines how creation of a [publication](https://www.postgresql.org/docs/current/logical-replication-publication.html) + should work. Specify one of the following values: + + + * `all_tables` - If a publication exists, the connector + uses it. If a publication does not exist, the connector + creates a publication for all tables in the database + for which the connector is capturing changes. For + the connector to create a publication it must access + the database through a database user account that + has permission to create publications and perform + replications. You grant the required permission by + using the following SQL command CREATE PUBLICATION + FOR ALL TABLES;. + + * `disabled` - The connector does not attempt to create + a publication. A database administrator or the user + configured to perform replications must have created + the publication before running the connector. If the + connector cannot find the publication, the connector + throws an exception and stops. + + * `filtered` - If a publication exists, the connector + uses it. If no publication exists, the connector creates + a new publication for tables that match the current + filter configuration as specified by the schema.include.list, + schema.exclude.list, and table.include.list, and table.exclude.list + connector configuration properties. For example: CREATE + PUBLICATION FOR TABLE . If the publication exists, the connector updates + the publication for tables that match the current + filter configuration. For example: ALTER PUBLICATION + SET TABLE . + + ' + replicaIdentityAutosetValues: + type: array + items: + type: string + description: "The setting determines the value for [replica\ + \ identity](https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY)\ + \ at table level.\n This option will overwrite the\ + \ existing value in database. A comma-separated list\ + \ of regular expressions that match fully-qualified\ + \ tables and replica identity value to be used in\ + \ the table.\n Each expression must match the pattern\ + \ ':',\ + \ where the table name could be defined as (SCHEMA_NAME.TABLE_NAME),\ + \ and the replica identity values are:\n DEFAULT\ + \ - Records the old values of the columns of the primary\ + \ key, if any. This is the default for non-system\ + \ tables.\n INDEX index_name - Records the old values\ + \ of the columns covered by the named index, that\ + \ must be unique, not partial, not deferrable, and\ + \ include only columns marked NOT NULL. If this index\ + \ is dropped, the behavior is the same as NOTHING.\n\ + \ FULL - Records the old values of all columns in\ + \ the row.\n NOTHING - Records no information about\ + \ the old row. This is the default for system tables.\n\ + \ For example,\n schema1.*:FULL,schema2.table2:NOTHING,schema2.table3:INDEX\ + \ idx_name\n" + binaryHandlingMode: + type: string + description: 'Default `bytes`. Specifies how binary + (bytea) columns should be represented in change events: + + + * `bytes` represents binary data as byte array. + + * `base64` represents binary data as base64-encoded + strings. + + * `base64-url-safe` represents binary data as base64-url-safe-encoded + strings. + + * `hex` represents binary data as hex-encoded (base16) + strings. + + ' + schemaNameAdjustmentMode: + type: string + description: 'Default `none`. Specifies how schema names + should be adjusted for compatibility with the message + converter used by the connector. Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used + in the Avro type name with underscore. + + * `avro_unicode` replaces the underscore or characters + that cannot be used in the Avro type name with corresponding + unicode like _uxxxx. Note: _ is an escape sequence + like backslash in Java + + ' + fieldNameAdjustmentMode: + type: string + description: 'Default `none`. Specifies how field names + should be adjusted for compatibility with the message + converter used by the connector. Possible settings: + + + * `none` does not apply any adjustment. + + * `avro` replaces the characters that cannot be used + in the Avro type name with underscore. + + * `avro_unicode` replaces the underscore or characters + that cannot be used in the Avro type name with corresponding + unicode like _uxxxx. Note: _ is an escape sequence + like backslash in Java + + + For more information, see [Avro naming](https://debezium.io/documentation/reference/stable/configuration/avro.html#avro-naming). + + ' + moneyFractionDigits: + type: integer + description: 'Default `2`. Specifies how many decimal + digits should be used when converting Postgres money + type to java.math.BigDecimal, which represents the + values in change events. Applicable only when decimalHandlingMode + is set to precise. + + ' + converters: + type: object + additionalProperties: + type: object + additionalProperties: + type: string + description: "Enumerates a comma-separated list of the\ + \ symbolic names of the [custom converter](https://debezium.io/documentation/reference/stable/development/converters.html#custom-converters)\ + \ instances that the connector can use. For example,\n\ + \n```\nisbn:\n type: io.debezium.test.IsbnConverter\n\ + \ schemaName: io.debezium.postgresql.type.Isbn\n\ + ```\n\nYou must set the converters property to enable\ + \ the connector to use a custom converter.\n For each\ + \ converter that you configure for a connector, you\ + \ must also add a .type property, which specifies\ + \ the fully-qualified name of the class that implements\ + \ the converter interface.\nIf you want to further\ + \ control the behavior of a configured converter,\ + \ you can add one or more configuration parameters\ + \ to pass values to the converter. To associate any\ + \ additional configuration parameter with a converter,\ + \ prefix the parameter names with the symbolic name\ + \ of the converter.\n Each property is converted from\ + \ myPropertyName to my.property.name\n" + snapshotMode: + type: string + description: "Default `initial`. Specifies the criteria\ + \ for performing a snapshot when the connector starts:\n\ + \n* `always` - The connector performs a snapshot every\ + \ time that it starts. The snapshot includes the structure\ + \ and data of the captured tables. Specify this value\ + \ to populate topics with a complete representation\ + \ of the data from the captured tables every time\ + \ that the connector starts. After the snapshot completes,\ + \ the connector begins to stream event records for\ + \ subsequent database changes.\n* `initial` - The\ + \ connector performs a snapshot only when no offsets\ + \ have been recorded for the logical server name.\n\ + * `initial_only` - The connector performs an initial\ + \ snapshot and then stops, without processing any\ + \ subsequent changes.\n* `no_data` - The connector\ + \ never performs snapshots. When a connector is configured\ + \ this way, after it starts, it behaves as follows:\ + \ If there is a previously stored LSN in the Kafka\ + \ offsets topic, the connector continues streaming\ + \ changes from that position. If no LSN is stored,\ + \ the connector starts streaming changes from the\ + \ point in time when the PostgreSQL logical replication\ + \ slot was created on the server. Use this snapshot\ + \ mode only when you know all data of interest is\ + \ still reflected in the WAL.\n* `never` - Deprecated\ + \ see no_data.\n* `when_needed` - After the connector\ + \ starts, it performs a snapshot only if it detects\ + \ one of the following circumstances: \n It cannot\ + \ detect any topic offsets.\n A previously recorded\ + \ offset specifies a log position that is not available\ + \ on the server.\n* `configuration_based` - With this\ + \ option, you control snapshot behavior through a\ + \ set of connector properties that have the prefix\ + \ 'snapshotModeConfigurationBased'.\n* `custom` -\ + \ The connector performs a snapshot according to the\ + \ implementation specified by the snapshotModeCustomName\ + \ property, which defines a custom implementation\ + \ of the io.debezium.spi.snapshot.Snapshotter interface.\n\ + \nFor more information, see the [table of snapshot.mode\ + \ options](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-connector-snapshot-mode-options).\n" + snapshotModeConfigurationBasedSnapshotData: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector includes table data when it + performs a snapshot. + + ' + snapshotModeConfigurationBasedSnapshotSchema: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector includes the table schema when + it performs a snapshot. + + ' + snapshotModeConfigurationBasedStartStream: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector begins to stream change events + after a snapshot completes. + + ' + snapshotModeConfigurationBasedSnapshotOnSchemaError: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, set this property to specify + whether the connector includes table schema in a snapshot + if the schema history topic is not available. + + ' + snapshotModeConfigurationBasedSnapshotOnDataError: + type: boolean + description: 'Default `false`. If the snapshotMode is + set to configuration_based, this property specifies + whether the connector attempts to snapshot table data + if it does not find the last committed offset in the + transaction log. Set the value to true to instruct + the connector to perform a new snapshot. + + ' + snapshotModeCustomName: + type: string + description: 'When snapshotMode is set as custom, use + this setting to specify the name of the custom implementation + provided in the name() method that is defined by the + ''io.debezium.spi.snapshot.Snapshotter'' interface. + The provided implementation is called after a connector + restart to determine whether to perform a snapshot. + For more information, see [custom snapshotter SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + snapshotLockingMode: + type: string + description: 'Default `none`. Specifies how the connector + holds locks on tables while performing a schema snapshot. + Set one of the following options: + + + * `shared`: The connector holds a table lock that + prevents exclusive table access during the initial + portion phase of the snapshot in which database schemas + and other metadata are read. After the initial phase, + the snapshot no longer requires table locks. + + * `none`: The connector avoids locks entirely. Do + not use this mode if schema changes might occur during + the snapshot. + + + > *WARNING*: Do not use this mode if schema changes + might occur during the snapshot. + + + * `custom`: The connector performs a snapshot according + to the implementation specified by the snapshotLockingModeCustomName + property, which is a custom implementation of the + io.debezium.spi.snapshot.SnapshotLock interface. + + ' + snapshotLockingModeCustomName: + type: string + description: 'When snapshotLockingMode is set to custom, + use this setting to specify the name of the custom + implementation provided in the name() method that + is defined by the ''io.debezium.spi.snapshot.SnapshotLock'' + interface. For more information, see [custom snapshotter + SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + snapshotQueryMode: + type: string + description: 'Default `select_all`. Specifies how the + connector queries data while performing a snapshot. + Set one of the following options: + + + * `select_all`: The connector performs a select all + query by default, optionally adjusting the columns + selected based on the column include and exclude list + configurations. + + * `custom`: The connector performs a snapshot query + according to the implementation specified by the snapshotQueryModeCustomName + property, which defines a custom implementation of + the io.debezium.spi.snapshot.SnapshotQuery interface. + This setting enables you to manage snapshot content + in a more flexible manner compared to using the snapshotSelectStatementOverrides + property. + + ' + snapshotQueryModeCustomName: + type: string + description: 'When snapshotQueryMode is set as custom, + use this setting to specify the name of the custom + implementation provided in the name() method that + is defined by the ''io.debezium.spi.snapshot.SnapshotQuery'' + interface. For more information, see [custom snapshotter + SPI](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#connector-custom-snapshot). + + ' + snapshotIncludeCollectionList: + type: array + items: + type: string + description: "Default . An optional, list of regular\ + \ expressions that match the fully-qualified names\ + \ (.) of the tables to include\ + \ in a snapshot. The specified items must be named\ + \ in the connector’s table.include.list property.\ + \ This property takes effect only if the connector’s\ + \ snapshotMode property is set to a value other than\ + \ `never`. This property does not affect the behavior\ + \ of incremental snapshots.\n To match the name\ + \ of a table, Debezium applies the regular expression\ + \ that you specify as an anchored regular expression.\ + \ That is, the specified expression is matched against\ + \ the entire name string of the table; it does not\ + \ match substrings that might be present in a table\ + \ name.\n" + snapshotLockTimeoutMs: + type: integer + description: 'Default `10000`. Positive integer value + that specifies the maximum amount of time (in milliseconds) + to wait to obtain table locks when performing a snapshot. + If the connector cannot acquire table locks in this + time interval, the snapshot fails. [How the connector + performs snapshots](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-snapshots) + provides details. + + ' + snapshotSelectStatementOverrides: + type: object + additionalProperties: + type: string + description: "Specifies the table rows to include in\ + \ a snapshot. Use the property if you want a snapshot\ + \ to include only a subset of the rows in a table.\ + \ This property affects snapshots only. It does not\ + \ apply to events that the connector reads from the\ + \ log.\n The property contains a hierarchy of fully-qualified\ + \ table names in the form ..\ + \ For example,\n\n```\nsnapshotSelectStatementOverrides:\ + \ \n \"customers.orders\": \"SELECT * FROM [customers].[orders]\ + \ WHERE delete_flag = 0 ORDER BY id DESC\"\n```\n\n\ + In the resulting snapshot, the connector includes\ + \ only the records for which delete_flag = 0.\n" + eventProcessingFailureHandlingMode: + type: string + description: 'Default `fail`. Specifies how the connector + should react to exceptions during processing of events: + + + * `fail`: propagates the exception, indicates the + offset of the problematic event, and causes the connector + to stop. + + * `warn`: logs the offset of the problematic event, + skips that event, and continues processing. + + * `skip`: skips the problematic event and continues + processing. + + ' + maxBatchSize: + type: integer + description: 'Default `2048`. Positive integer value + that specifies the maximum size of each batch of events + that the connector processes. + + ' + maxQueueSize: + type: integer + description: 'Default `8192`. Positive integer value + that specifies the maximum number of records that + the blocking queue can hold. When Debezium reads events + streamed from the database, it places the events in + the blocking queue before it writes them to Kafka. + The blocking queue can provide backpressure for reading + change events from the database in cases where the + connector ingests messages faster than it can write + them to Kafka, or when Kafka becomes unavailable. + Events that are held in the queue are disregarded + when the connector periodically records offsets. Always + set the value of maxQueueSize to be larger than the + value of maxBatchSize. + + ' + maxQueueSizeInBytes: + type: integer + description: "Default `0`. A long integer value that\ + \ specifies the maximum volume of the blocking queue\ + \ in bytes. By default, volume limits are not specified\ + \ for the blocking queue. To specify the number of\ + \ bytes that the queue can consume, set this property\ + \ to a positive long value.\n If maxQueueSize is also\ + \ set, writing to the queue is blocked when the size\ + \ of the queue reaches the limit specified by either\ + \ property. For example, if you set maxQueueSize=1000,\ + \ and maxQueueSizeInBytes=5000, writing to the queue\ + \ is blocked after the queue contains 1000 records,\ + \ or after the volume of the records in the queue\ + \ reaches 5000 bytes.\n" + pollIntervalMs: + type: integer + description: 'Default `500`. Positive integer value + that specifies the number of milliseconds the connector + should wait for new change events to appear before + it starts processing a batch of events. Defaults to + 500 milliseconds. + + ' + includeUnknownDatatypes: + type: boolean + description: "Default `true`. Specifies connector behavior\ + \ when the connector encounters a field whose data\ + \ type is unknown. The default behavior is that the\ + \ connector omits the field from the change event\ + \ and logs a warning.\n Set this property to true\ + \ if you want the change event to contain an opaque\ + \ binary representation of the field. This lets consumers\ + \ decode the field. You can control the exact representation\ + \ by setting the binaryHandlingMode property.\n> *NOTE*:\ + \ Consumers risk backward compatibility issues when\ + \ `includeUnknownDatatypes` is set to `true`. Not\ + \ only may the database-specific binary representation\ + \ change between releases, but if the data type is\ + \ eventually supported by Debezium, the data type\ + \ will be sent downstream in a logical type, which\ + \ would require adjustments by consumers. In general,\ + \ when encountering unsupported data types, create\ + \ a feature request so that support can be added.\n" + databaseInitialStatements: + type: array + items: + type: string + description: "A list of SQL statements that the connector\ + \ executes when it establishes a JDBC connection to\ + \ the database.\n The connector may establish JDBC\ + \ connections at its own discretion. Consequently,\ + \ this property is useful for configuration of session\ + \ parameters only, and not for executing DML statements.\n\ + \ The connector does not execute these statements\ + \ when it creates a connection for reading the transaction\ + \ log.\n" + statusUpdateIntervalMs: + type: integer + description: 'Default `10000`. Frequency for sending + replication connection status updates to the server, + given in milliseconds. The property also controls + how frequently the database status is checked to detect + a dead connection in case the database was shut down. + + ' + heartbeatIntervalMs: + type: integer + description: "Default `0`. Controls how frequently the\ + \ connector sends heartbeat messages to a Kafka topic.\ + \ The default behavior is that the connector does\ + \ not send heartbeat messages.\n Heartbeat messages\ + \ are useful for monitoring whether the connector\ + \ is receiving change events from the database. Heartbeat\ + \ messages might help decrease the number of change\ + \ events that need to be re-sent when a connector\ + \ restarts. To send heartbeat messages, set this property\ + \ to a positive integer, which indicates the number\ + \ of milliseconds between heartbeat messages.\n Heartbeat\ + \ messages are needed when there are many updates\ + \ in a database that is being tracked but only a tiny\ + \ number of updates are related to the table(s) and\ + \ schema(s) for which the connector is capturing changes.\ + \ In this situation, the connector reads from the\ + \ database transaction log as usual but rarely emits\ + \ change records to Kafka. This means that no offset\ + \ updates are committed to Kafka and the connector\ + \ does not have an opportunity to send the latest\ + \ retrieved LSN to the database. The database retains\ + \ WAL files that contain events that have already\ + \ been processed by the connector. Sending heartbeat\ + \ messages enables the connector to send the latest\ + \ retrieved LSN to the database, which allows the\ + \ database to reclaim disk space being used by no\ + \ longer needed WAL files.\n" + heartbeatActionQuery: + type: string + description: "Specifies a query that the connector executes\ + \ on the source database when the connector sends\ + \ a heartbeat message.\n This is useful for resolving\ + \ the situation described in [WAL disk space consumption](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-wal-disk-space),\ + \ where capturing changes from a low-traffic database\ + \ on the same host as a high-traffic database prevents\ + \ Debezium from processing WAL records and thus acknowledging\ + \ WAL positions with the database. To address this\ + \ situation, create a heartbeat table in the low-traffic\ + \ database, and set this property to a statement that\ + \ inserts records into that table, for example:\n\n\ + \ ```\n INSERT INTO test_heartbeat_table (text) VALUES\ + \ ('test_heartbeat')\n ```\n \n This allows the connector\ + \ to receive changes from the low-traffic database\ + \ and acknowledge their LSNs, which prevents unbounded\ + \ WAL growth on the database host.\n" + schemaRefreshMode: + type: string + description: 'Default `columns_diff`. Specify the conditions + that trigger a refresh of the in-memory schema for + a table. + + + * `columns_diff`: is the safest mode. It ensures that + the in-memory schema stays in sync with the database + table’s schema at all times. + + * `columns_diff_exclude_unchanged_toast`: instructs + the connector to refresh the in-memory schema cache + if there is a discrepancy with the schema derived + from the incoming message, unless unchanged TOASTable + data fully accounts for the discrepancy. + + + This setting can significantly improve connector performance + if there are frequently-updated tables that have TOASTed + data that are rarely part of updates. However, it + is possible for the in-memory schema to become outdated + if TOASTable columns are dropped from the table. + + ' + snapshotDelayMs: + type: integer + description: 'An interval in milliseconds that the connector + should wait before performing a snapshot when the + connector starts. If you are starting multiple connectors + in a cluster, this property is useful for avoiding + snapshot interruptions, which might cause re-balancing + of connectors. + + ' + snapshotFetchSize: + type: integer + description: 'Default `10240`. During a snapshot, the + connector reads table content in batches of rows. + This property specifies the maximum number of rows + in a batch. + + ' + slotStreamParams: + type: object + description: "Parameters to pass to the configured logical\ + \ decoding plug-in. For example:\n\n```\nslotStreamParams:\n\ + \ add-tables: \"public.table,public.table2\"\n include-lsn:\ + \ \"true\"\n```\n" + additionalProperties: + type: string + slotMaxRetries: + type: integer + description: 'Default `6`. If connecting to a replication + slot fails, this is the maximum number of consecutive + attempts to connect. + + ' + slotRetryDelayMs: + type: integer + description: 'Default `10000` (10 seconds). The number + of milliseconds to wait between retry attempts when + the connector fails to connect to a replication slot. + + ' + unavailableValuePlaceholder: + type: string + description: 'Default `__debezium_unavailable_value`. + Specifies the constant that the connector provides + to indicate that the original value is a toasted value + that is not provided by the database. If the setting + of unavailable.value.placeholder starts with the hex: + prefix it is expected that the rest of the string + represents hexadecimally encoded octets. For more + information, see [toasted values](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-toasted-values). + + ' + provideTransactionMetadata: + type: boolean + description: 'Default `false`. Determines whether the + connector generates events with transaction boundaries + and enriches change event envelopes with transaction + metadata. Specify true if you want the connector to + do this. For more information, see [Transaction metadata](https://debezium.io/documentation/reference/stable/connectors/postgresql.html#postgresql-transaction-metadata). + + ' + flushLsnSource: + type: boolean + description: 'Default `true`. Determines whether the + connector should commit the LSN of the processed records + in the source postgres database so that the WAL logs + can be deleted. Specify false if you don’t want the + connector to do this. Please note that if set to false + LSN will not be acknowledged by Debezium and as a + result WAL logs will not be cleared which might result + in disk space issues. User is expected to handle the + acknowledgement of LSN outside Debezium. + + ' + retriableRestartConnectorWaitMs: + type: integer + description: 'Default `10000` (10 seconds). The number + of milliseconds to wait before restarting a connector + after a retriable error occurs. + + ' + skippedOperations: + type: array + items: + type: string + description: 'Default `none`. A list of operation types + that will be skipped during streaming. The operations + include: c for inserts/create, u for updates, d for + deletes, t for truncates, and none to not skip any + operations. By default, no operations are skipped. + + ' + signalDataCollection: + type: string + description: 'Fully-qualified name of the data collection + that is used to send signals to the connector. Use + the following format to specify the collection name: + . + + ' + signalEnabledChannels: + type: array + items: + type: string + description: 'Default `[sgstream-annotations]`. List + of the signaling channel names that are enabled for + the connector. By default, the following channels + are available: sgstream-annotations, source, kafka, + file and jmx. Optionally, you can also implement a + [custom signaling channel](https://debezium.io/documentation/reference/stable/configuration/signalling.html#debezium-signaling-enabling-custom-signaling-channel). + + ' + notificationEnabledChannels: + type: array + items: + type: string + description: 'List of notification channel names that + are enabled for the connector. By default, the following + channels are available: sink, log and jmx. Optionally, + you can also implement a [custom notification channel](https://debezium.io/documentation/reference/stable/configuration/signalling.html#debezium-signaling-enabling-custom-signaling-channel). + + ' + incrementalSnapshotChunkSize: + type: integer + description: 'Default `1024`. The maximum number of + rows that the connector fetches and reads into memory + during an incremental snapshot chunk. Increasing the + chunk size provides greater efficiency, because the + snapshot runs fewer snapshot queries of a greater + size. However, larger chunk sizes also require more + memory to buffer the snapshot data. Adjust the chunk + size to a value that provides the best performance + in your environment. + + ' + incrementalSnapshotWatermarkingStrategy: + type: string + description: 'Default `insert_insert`. Specifies the + watermarking mechanism that the connector uses during + an incremental snapshot to deduplicate events that + might be captured by an incremental snapshot and then + recaptured after streaming resumes. + + + You can specify one of the following options: + + + * `insert_insert`: When you send a signal to initiate + an incremental snapshot, for every chunk that Debezium + reads during the snapshot, it writes an entry to the + signaling data collection to record the signal to + open the snapshot window. After the snapshot completes, + Debezium inserts a second entry to record the closing + of the window. + + * `insert_delete`: When you send a signal to initiate + an incremental snapshot, for every chunk that Debezium + reads, it writes a single entry to the signaling data + collection to record the signal to open the snapshot + window. After the snapshot completes, this entry is + removed. No entry is created for the signal to close + the snapshot window. Set this option to prevent rapid + growth of the signaling data collection. + + ' + xminFetchIntervalMs: + type: integer + description: 'Default `0`. How often, in milliseconds, + the XMIN will be read from the replication slot. The + XMIN value provides the lower bounds of where a new + replication slot could start from. The default value + of 0 disables tracking XMIN tracking. + + ' + topicNamingStrategy: + type: string + description: 'Default `io.debezium.schema.SchemaTopicNamingStrategy`. + The name of the TopicNamingStrategy class that should + be used to determine the topic name for data change, + schema change, transaction, heartbeat event etc., + defaults to SchemaTopicNamingStrategy. + + ' + topicDelimiter: + type: string + description: 'Default `.`. Specify the delimiter for + topic name, defaults to ".". + + ' + topicCacheSize: + type: integer + description: 'Default `10000`. The size used for holding + the topic names in bounded concurrent hash map. This + cache will help to determine the topic name corresponding + to a given data collection. + + ' + topicHeartbeatPrefix: + type: string + description: 'Default `__debezium-heartbeat`. Controls + the name of the topic to which the connector sends + heartbeat messages. For example, if the topic prefix + is fulfillment, the default topic name is __debezium-heartbeat.fulfillment. + + ' + topicTransaction: + type: string + description: 'Default `transaction`. Controls the name + of the topic to which the connector sends transaction + metadata messages. For example, if the topic prefix + is fulfillment, the default topic name is fulfillment.transaction. + + ' + snapshotMaxThreads: + type: integer + description: 'Default `1`. Specifies the number of threads + that the connector uses when performing an initial + snapshot. To enable parallel initial snapshots, set + the property to a value greater than 1. In a parallel + initial snapshot, the connector processes multiple + tables concurrently. This feature is incubating. + + ' + customMetricTags: + type: object + additionalProperties: + type: string + description: "The custom metric tags will accept key-value\ + \ pairs to customize the MBean object name which should\ + \ be appended the end of regular name, each key would\ + \ represent a tag for the MBean object name, and the\ + \ corresponding value would be the value of that tag\ + \ the key is. For example:\n\n```\ncustomMetricTags:\n\ + \ k1: v1\n k2: v2\n```\n" + errorsMaxRetries: + type: integer + description: 'Default `-1`. Specifies how the connector + responds after an operation that results in a retriable + error, such as a connection error. + + + Set one of the following options: + + + * `-1`: No limit. The connector always restarts automatically, + and retries the operation, regardless of the number + of previous failures. + + * `0`: Disabled. The connector fails immediately, + and never retries the operation. User intervention + is required to restart the connector. + + * `> 0`: The connector restarts automatically until + it reaches the specified maximum number of retries. + After the next failure, the connector stops, and user + intervention is required to restart it. + + ' + target: + type: object + description: 'The target of this sream. + + ' + required: + - type + properties: + type: + type: string + description: 'Indicate the type of target of this stream. Possible + values are: + + + * `CloudEvent`: events will be sent to a cloud event receiver. + + * `PgLambda`: events will trigger the execution of a lambda + script by integrating with [Knative Service](https://knative.dev/docs/serving/) + (Knative must be already installed). + + * `SGCluster`: events will be sinked to an SGCluster allowing + migration of data. + + ' + cloudEvent: + type: object + description: 'Configuration section for `CloudEvent` target + type. + + ' + properties: + format: + type: string + description: 'The CloudEvent format (json by default). + + + Only json is supported at the moment. + + ' + binding: + type: string + description: 'The CloudEvent binding (http by default). + + + Only http is supported at the moment. + + ' + http: + type: object + description: The http binding configuration. + required: + - url + properties: + url: + type: string + description: The URL used to send the CloudEvents to + the endpoint. + headers: + type: object + description: Headers to include when sending CloudEvents + to the endpoint. + additionalProperties: + type: string + connectTimeout: + type: string + description: 'Set the connect timeout. + + + Value 0 represents infinity (default). Negative values + are not allowed. + + ' + readTimeout: + type: string + description: 'Set the read timeout. The value is the + timeout to read a response. + + + Value 0 represents infinity (default). Negative values + are not allowed. + + ' + retryLimit: + type: integer + description: 'Set the retry limit. When set the event + will be sent again after an error for the specified + limit of times. When not set the event will be sent + again after an error. + + ' + retryBackoffDelay: + type: integer + description: 'The maximum amount of delay in seconds + after an error before retrying again. + + + The initial delay will use 10% of this value and then + increase the value exponentially up to the maximum + amount of seconds specified with this field. + + ' + default: 60 + skipHostnameVerification: + type: boolean + description: When `true` disable hostname verification. + pgLambda: + type: object + description: 'Configuration section for `PgLambda` target type. + + ' + properties: + scriptType: + type: string + description: "The PgLambda script format (javascript by\ + \ default).\n\n* `javascript`: the script will receive\ + \ the following variable:\n * `request`: the HTTP request\ + \ object. See https://nodejs.org/docs/latest-v18.x/api/http.html#class-httpclientrequest\n\ + \ * `response`: the HTTP response object. See https://nodejs.org/docs/latest-v18.x/api/http.html#class-httpserverresponse\n\ + \ * `event`: the CloudEvent event object. See https://github.com/cloudevents/sdk-javascript\n" + script: + type: string + description: 'Script to execute. This field is mutually + exclusive with `scriptFrom` field. + + ' + scriptFrom: + type: object + description: 'Reference to either a Kubernetes [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) + or a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + that contains the script to execute. This field is mutually + exclusive with `script` field. + + + Fields `secretKeyRef` and `configMapKeyRef` are mutually + exclusive, and one of them is required. + + ' + properties: + secretKeyRef: + type: object + description: 'A Kubernetes [SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#secretkeyselector-v1-core) + that contains the script to execute. This field is + mutually exclusive with `configMapKeyRef` field. + + ' + properties: + name: + type: string + description: Name of the referent. [More information](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + key: + type: string + description: The key of the secret to select from. + Must be a valid secret key. + configMapKeyRef: + type: object + description: 'A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) + reference that contains the script to execute. This + field is mutually exclusive with `secretKeyRef` field. + + ' + properties: + name: + type: string + description: 'The name of the ConfigMap that contains + the script to execute. + + ' + key: + type: string + description: 'The key name within the ConfigMap + that contains the script to execute. + + ' + knative: + type: object + description: Knative Service configuration. + properties: + annotations: + type: object + description: Annotations to set to Knative Service + additionalProperties: + type: string + labels: + type: object + description: Labels to set to Knative Service + additionalProperties: + type: string + http: + type: object + description: PgLambda uses a CloudEvent http binding + to send events to the Knative Service. This section + allow to modify the configuration of this binding. + properties: + url: + type: string + description: The URL used to send the CloudEvents + to the endpoint. + headers: + type: object + description: Headers to include when sending CloudEvents + to the endpoint. + additionalProperties: + type: string + connectTimeout: + type: string + description: 'Set the connect timeout. + + + Value 0 represents infinity (default). Negative + values are not allowed. + + ' + readTimeout: + type: string + description: 'Set the read timeout. The value is + the timeout to read a response. + + + Value 0 represents infinity (default). Negative + values are not allowed. + + ' + retryLimit: + type: integer + description: 'Set the retry limit. When set the + event will be sent again after an error for the + specified limit of times. When not set the event + will be sent again after an error. + + ' + retryBackoffDelay: + type: integer + description: 'The maximum amount of delay in seconds + after an error before retrying again. + + + The initial delay will use 10% of this value and + then increase the value exponentially up to the + maximum amount of seconds specified with this + field. + + ' + default: 60 + skipHostnameVerification: + type: boolean + description: When `true` disable hostname verification. + sgCluster: + type: object + description: 'The configuration of the data target required + when type is `SGCluster`. + + ' + required: + - name + properties: + name: + type: string + description: 'The target SGCluster name. + + ' + database: + type: string + description: 'The target database name to which the data + will be migrated to. + + + If not specified the default postgres database will be + targeted. + + ' + username: + type: object + description: 'The username used by the CDC sink process + to connect to the database. + + + If not specified the default superuser username (by default + postgres) will be used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the username is + stored. + + ' + key: + type: string + description: 'The Secret key where the username is stored. + + ' + password: + type: object + description: 'The password used by the CDC sink process + to connect to the database. + + + If not specified the default superuser password will be + used. + + ' + required: + - name + - key + properties: + name: + type: string + description: 'The Secret name where the password is + stored. + + ' + key: + type: string + description: 'The Secret key where the password is stored. + + ' + skipDdlImport: + type: boolean + description: 'When `true` disable import of DDL and tables + will be created on demand by Debezium. + + ' + ddlImportRoleSkipFilter: + type: string + description: 'Allow to set a [SIMILAR TO regular expression](https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-SIMILARTO-REGEXP) + to match the names of the roles to skip during import + of DDL. + + + When not set and source is an SGCluster will match the + superuser, replicator and authenticator usernames. + + ' + debeziumProperties: + type: object + description: 'Specific property of the debezium JDBC sink. + + + See https://debezium.io/documentation/reference/stable/connectors/jdbc.html#jdbc-connector-configuration + + + Each property is converted from myPropertyName to my.property.name + + ' + properties: + connectionPoolMin_size: + type: integer + description: 'Default `5`. Specifies the minimum number + of connections in the pool. + + ' + connectionPoolMax_size: + type: integer + description: 'Default `32`. Specifies the maximum number + of concurrent connections that the pool maintains. + + ' + connectionPoolAcquire_increment: + type: integer + description: 'Default `32`. Specifies the number of + connections that the connector attempts to acquire + if the connection pool exceeds its maximum size. + + ' + connectionPoolTimeout: + type: integer + description: 'Default `1800`. Specifies the number of + seconds that an unused connection is kept before it + is discarded. + + ' + databaseTime_zone: + type: string + description: 'Default `UTC`. Specifies the timezone + used when inserting JDBC temporal values. + + ' + deleteEnabled: + type: boolean + description: 'Default `true`. Specifies whether the + connector processes DELETE or tombstone events and + removes the corresponding row from the database. Use + of this option requires that you set the `primaryKeyMode` + to `record_key`. + + ' + truncateEnabled: + type: boolean + description: 'Default `true`. Specifies whether the + connector processes TRUNCATE events and truncates + the corresponding tables from the database. + + Although support for TRUNCATE statements has been + available in Db2 since version 9.7, currently, the + JDBC connector is unable to process standard TRUNCATE + events that the Db2 connector emits. + + To ensure that the JDBC connector can process TRUNCATE + events received from Db2, perform the truncation by + using an alternative to the standard TRUNCATE TABLE + statement. For example: + + + ``` + + ALTER TABLE ACTIVATE NOT LOGGED INITIALLY + WITH EMPTY TABLE + + ``` + + + The user account that submits the preceding query + requires ALTER privileges on the table to be truncated. + + ' + insertMode: + type: string + description: 'Default `upsert`. Specifies the strategy + used to insert events into the database. The following + options are available: + + * `insert`: Specifies that all events should construct + INSERT-based SQL statements. Use this option only + when no primary key is used, or when you can be certain + that no updates can occur to rows with existing primary + key values. + + * `update`: Specifies that all events should construct + UPDATE-based SQL statements. Use this option only + when you can be certain that the connector receives + only events that apply to existing rows. + + * `upsert`: Specifies that the connector adds events + to the table using upsert semantics. That is, if the + primary key does not exist, the connector performs + an INSERT operation, and if the key does exist, the + connector performs an UPDATE operation. When idempotent + writes are required, the connector should be configured + to use this option. + + ' + primaryKeyMode: + type: string + description: 'Default `record_key`. Specifies how the + connector resolves the primary key columns from the + event. + + * `none`: Specifies that no primary key columns are + created. + + * `record_key`: Specifies that the primary key columns + are sourced from the event’s record key. If the record + key is a primitive type, the `primaryKeyFields` property + is required to specify the name of the primary key + column. If the record key is a struct type, the `primaryKeyFields` + property is optional, and can be used to specify a + subset of columns from the event’s key as the table’s + primary key. + + * `record_value`: Specifies that the primary key columns + is sourced from the event’s value. You can set the + `primaryKeyFields` property to define the primary + key as a subset of fields from the event’s value; + otherwise all fields are used by default. + + ' + primaryKeyFields: + type: array + description: 'Either the name of the primary key column + or a comma-separated list of fields to derive the + primary key from. + + When `primaryKeyMode` is set to `record_key` and the + event’s key is a primitive type, it is expected that + this property specifies the column name to be used + for the key. + + When the `primaryKeyMode` is set to `record_key` with + a non-primitive key, or record_value, it is expected + that this property specifies a comma-separated list + of field names from either the key or value. If the + primary.key.mode is set to record_key with a non-primitive + key, or record_value, and this property is not specifies, + the connector derives the primary key from all fields + of either the record key or record value, depending + on the specified mode. + + ' + items: + type: string + quoteIdentifiers: + type: boolean + description: 'Default `true`. Specifies whether generated + SQL statements use quotation marks to delimit table + and column names. See the Quoting and case sensitivity + section for more details. + + ' + schemaEvolution: + type: string + description: 'Default `basic`. Specifies how the connector + evolves the destination table schemas. For more information, + see Schema evolution. The following options are available: + + `none`: Specifies that the connector does not evolve + the destination schema. + + `basic`: Specifies that basic evolution occurs. The + connector adds missing columns to the table by comparing + the incoming event’s record schema to the database + table structure. + + ' + tableNameFormat: + type: string + description: 'Default `${original}`. Specifies a string + that determines how the destination table name is + formatted, based on the topic name of the event. The + placeholder ${original} is replaced with the schema + name and the table name separated by a point character + (`.`). + + ' + dialectPostgresPostgisSchema: + type: string + description: 'Default `public`. Specifies the schema + name where the PostgreSQL PostGIS extension is installed. + The default is `public`; however, if the PostGIS extension + was installed in another schema, this property should + be used to specify the alternate schema name. + + ' + dialectSqlserverIdentityInsert: + type: boolean + description: 'Default `false`. Specifies whether the + connector automatically sets an IDENTITY_INSERT before + an INSERT or UPSERT operation into the identity column + of SQL Server tables, and then unsets it immediately + after the operation. When the default setting (`false`) + is in effect, an INSERT or UPSERT operation into the + IDENTITY column of a table results in a SQL exception. + + ' + batchSize: + type: integer + description: 'Default `500`. Specifies how many records + to attempt to batch together into the destination + table. + + > Note that if you set `consumerMaxPollRecords` in + the Connect worker properties to a value lower than + `batchSize`, batch processing will be caped by `consumerMaxPollRecords` + and the desired `batchSize` won’t be reached. You + can also configure the connector’s underlying consumer’s + `maxPollRecords` using `consumerOverrideMaxPollRecords` + in the connector configuration. + + ' + columnNamingStrategy: + type: string + description: 'Default `io.debezium.connector.jdbc.naming.DefaultColumnNamingStrategy`. + Specifies the fully-qualified class name of a ColumnNamingStrategy + implementation that the connector uses to resolve + column names from event field names. + + By default, the connector uses the field name as the + column name. + + ' + tableNamingStrategy: + type: string + description: 'Default `io.stackgres.stream.jobs.migration.StreamMigrationTableNamingStrategy`. + Specifies the fully-qualified class name of a TableNamingStrategy + implementation that the connector uses to resolve + table names from incoming event topic names. + + The default behavior is to: + + * Replace the ${topic} placeholder in the `tableNameFormat` + configuration property with the event’s topic. + + * Sanitize the table name by replacing dots (`.`) + with underscores (`_`). + + ' + maxRetries: + type: integer + description: 'The maximum number of retries the streaming operation + is allowed to do after a failure. + + + A value of `0` (zero) means no retries are made. A value of `-1` + means retries are unlimited. Defaults to: `-1`. + + ' + pods: + type: object + description: The configuration for SGStream Pod + required: + - persistentVolume + properties: + persistentVolume: + type: object + description: "Pod's persistent volume configuration.\n\n**Example:**\n\ + \n```yaml\napiVersion: stackgres.io/v1\nkind: SGCluster\n\ + metadata:\n name: stackgres\nspec:\n pods:\n persistentVolume:\n\ + \ size: '5Gi'\n storageClass: default\n```\n" + required: + - size + properties: + size: + type: string + pattern: ^[0-9]+(\.[0-9]+)?(Mi|Gi|Ti)$ + description: 'Size of the PersistentVolume for stream Pod. + This size is specified either in Mebibytes, Gibibytes + or Tebibytes (multiples of 2^20, 2^30 or 2^40, respectively). + + ' + storageClass: + type: string + description: 'Name of an existing StorageClass in the Kubernetes + cluster, used to create the PersistentVolume for stream. + + ' + resources: + type: object + description: 'The resources assigned to the stream container. + + + See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + + ' + properties: + claims: + description: 'Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + + + This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. + + + This field is immutable. It can only be set for containers.' + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + limits: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String() and AsInt64()\ + \ accessors.\n\nThe serialization format is:\n\n```\ + \ ::= \n\n\t\ + (Note that may be empty, from the \"\" case\ + \ in .)\n\n ::= 0 | 1 |\ + \ ... | 9 ::= | \ + \ ::= | .\ + \ | . | . ::= \"+\"\ + \ | \"-\" ::= | \ + \ ::= | \ + \ | ::= Ki | Mi | Gi |\ + \ Ti | Pi | Ei\n\n\t(International System of units;\ + \ See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P\ + \ | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't\ + \ choose the capitalization.)\n\n ::=\ + \ \"e\" | \"E\" ```\n\n\ + No matter which of the three exponent forms is used,\ + \ no quantity may represent a number greater than 2^63-1\ + \ in magnitude, nor may it have more than 3 decimal\ + \ places. Numbers larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we require larger\ + \ or smaller quantities.\n\nWhen a Quantity is parsed\ + \ from a string, it will remember the type of suffix\ + \ it had, and will use the same type again when it is\ + \ serialized.\n\nBefore serializing, Quantity will be\ + \ put in \"canonical form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n-\ + \ No precision is lost - No fractional digits will be\ + \ emitted - The exponent (or suffix) is as large as\ + \ possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized\ + \ as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER be internally\ + \ represented by a floating point number. That is the\ + \ whole point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical form. (So\ + \ always use canonical form, or don't diff.)\n\nThis\ + \ format is intended to make it difficult to use these\ + \ numbers without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + description: "Quantity is a fixed-point representation\ + \ of a number. It provides convenient marshaling/unmarshaling\ + \ in JSON and YAML, in addition to String() and AsInt64()\ + \ accessors.\n\nThe serialization format is:\n\n```\ + \ ::= \n\n\t\ + (Note that may be empty, from the \"\" case\ + \ in .)\n\n ::= 0 | 1 |\ + \ ... | 9 ::= | \ + \ ::= | .\ + \ | . | . ::= \"+\"\ + \ | \"-\" ::= | \ + \ ::= | \ + \ | ::= Ki | Mi | Gi |\ + \ Ti | Pi | Ei\n\n\t(International System of units;\ + \ See: http://physics.nist.gov/cuu/Units/binary.html)\n\ + \n ::= m | \"\" | k | M | G | T | P\ + \ | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't\ + \ choose the capitalization.)\n\n ::=\ + \ \"e\" | \"E\" ```\n\n\ + No matter which of the three exponent forms is used,\ + \ no quantity may represent a number greater than 2^63-1\ + \ in magnitude, nor may it have more than 3 decimal\ + \ places. Numbers larger or more precise will be capped\ + \ or rounded up. (E.g.: 0.1m will rounded up to 1m.)\ + \ This may be extended in the future if we require larger\ + \ or smaller quantities.\n\nWhen a Quantity is parsed\ + \ from a string, it will remember the type of suffix\ + \ it had, and will use the same type again when it is\ + \ serialized.\n\nBefore serializing, Quantity will be\ + \ put in \"canonical form\". This means that Exponent/suffix\ + \ will be adjusted up or down (with a corresponding\ + \ increase or decrease in Mantissa) such that:\n\n-\ + \ No precision is lost - No fractional digits will be\ + \ emitted - The exponent (or suffix) is as large as\ + \ possible.\n\nThe sign will be omitted unless the number\ + \ is negative.\n\nExamples:\n\n- 1.5 will be serialized\ + \ as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\ + \n\nNote that the quantity will NEVER be internally\ + \ represented by a floating point number. That is the\ + \ whole point of this exercise.\n\nNon-canonical values\ + \ will still parse as long as they are well formed,\ + \ but will be re-emitted in their canonical form. (So\ + \ always use canonical form, or don't diff.)\n\nThis\ + \ format is intended to make it difficult to use these\ + \ numbers without writing some sort of special handling\ + \ code in the hopes that that will cause implementors\ + \ to also use a fixed point implementation." + type: string + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + scheduling: + type: object + description: 'Pod custom scheduling, affinity and topology spread + constratins configuration. + + ' + properties: + nodeSelector: + type: object + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match + a node''s labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + + ' + tolerations: + description: 'If specified, the pod''s tolerations. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#toleration-v1-core' + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to + match. Empty means match all taint effects. When + specified, allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If + the key is empty, operator must be Exists; this + combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints + of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect + NoExecute, otherwise this field is ignored) tolerates + the taint. By default, it is not set, which means + tolerate the taint forever (do not evict). Zero + and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + nodeAffinity: + description: 'Node affinity is a group of node affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#nodeaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a + no-op). A null preferred scheduling term matches + no objects (i.e. is also a no-op). + properties: + preference: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range + 1-100. + format: int32 + type: integer + required: + - weight + - preference + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: A node selector represents the union of + the results of one or more label queries over a set + of nodes; that is, it represents the OR of the selectors + represented by the node selector terms. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + priorityClassName: + description: If specified, indicates the pod's priority. + "system-node-critical" and "system-cluster-critical" are + two special keywords which indicate the highest priorities + with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object + with that name. If not specified, the pod priority will + be default or zero if there is no default. + type: string + podAffinity: + description: 'Pod affinity is a group of inter pod affinity + scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the + corresponding podAffinityTerm; the node(s) with the + highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range + 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a + pod label update), the system may or may not try to + eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: 'Pod anti affinity is a group of inter pod + anti affinity scheduling rules. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podantiaffinity-v1-core' + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the greatest + sum of weights, i.e. for each node that meets all + of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the + node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod + label keys to select which pods will be + taken into consideration. The keys are used + to lookup values from the incoming pod labels, + those key-value labels are merged with `LabelSelector` + as `key in (value)` to select the group + of existing pods which pods will be taken + into consideration for the incoming pod's + pod (anti) affinity. Keys that don't exist + in the incoming pod labels will be ignored. + The default value is empty. The same key + is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys + cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling + MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of + pod label keys to select which pods will + be taken into consideration. The keys are + used to lookup values from the incoming + pod labels, those key-value labels are merged + with `LabelSelector` as `key notin (value)` + to select the group of existing pods which + pods will be taken into consideration for + the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod + labels will be ignored. The default value + is empty. The same key is forbidden to exist + in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when + LabelSelector isn't set. This is an alpha + field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty + label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range + 1-100. + format: int32 + type: integer + required: + - weight + - podAffinityTerm + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a + pod label update), the system may or may not try to + eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select which pods will be taken into + consideration. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + in (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys + and LabelSelector. Also, MatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + mismatchLabelKeys: + description: MismatchLabelKeys is a set of pod + label keys to select which pods will be taken + into consideration. The keys are used to lookup + values from the incoming pod labels, those key-value + labels are merged with `LabelSelector` as `key + notin (value)` to select the group of existing + pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. + Keys that don't exist in the incoming pod labels + will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys + and LabelSelector. Also, MismatchLabelKeys cannot + be set when LabelSelector isn't set. This is + an alpha field and requires enabling MatchLabelKeysInPodAffinity + feature gate. + items: + type: string + type: array + namespaceSelector: + description: A label selector is a label query + over a set of resources. The result of matchLabels + and matchExpressions are ANDed. An empty label + selector matches all objects. A null label selector + matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + topologySpreadConstraints: + description: 'TopologySpreadConstraints describes how a + group of pods ought to spread across topology domains. + Scheduler will schedule pods in a way which abides by + the constraints. All topologySpreadConstraints are ANDed. + + + See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#topologyspreadconstraint-v1-core' + items: + description: TopologySpreadConstraint specifies how to + spread matching pods among the given topology. + properties: + labelSelector: + description: A label selector is a label query over + a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector + matches all objects. A null label selector matches + no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: 'MatchLabelKeys is a set of pod label + keys to select the pods over which spreading will + be calculated. The keys are used to lookup values + from the incoming pod labels, those key-value labels + are ANDed with labelSelector to select the group + of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden + to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector + isn''t set. Keys that don''t exist in the incoming + pod labels will be ignored. A null or empty list + means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default).' + items: + type: string + type: array + maxSkew: + description: 'MaxSkew describes the degree to which + pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the + number of matching pods in the target topology and + the global minimum. The global minimum is the minimum + number of matching pods in an eligible domain or + zero if the number of eligible domains is less than + MinDomains. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum + is 1. | zone1 | zone2 | zone3 | | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled + to zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can + be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default + value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: 'MinDomains indicates a minimum number + of eligible domains. When the number of eligible + domains with matching topology keys is less than + minDomains, Pod Topology Spread treats "global minimum" + as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching + topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, + when the number of eligible domains is less than + minDomains, scheduler won''t schedule more than + maxSkew Pods to those domains. If value is nil, + the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be + DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set + to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: | zone1 | zone2 | + zone3 | | P P | P P | P P | The number of + domains is less than 5(MinDomains), so "global minimum" + is treated as 0. In this situation, new pod with + the same labelSelector cannot be scheduled, because + computed skew will be 3(3 - 0) if new Pod is scheduled + to any of the three zones, it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default).' + format: int32 + type: integer + nodeAffinityPolicy: + description: 'NodeAffinityPolicy indicates how we + will treat Pod''s nodeAffinity/nodeSelector when + calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + nodeTaintsPolicy: + description: 'NodeTaintsPolicy indicates how we will + treat node taints when calculating pod topology + spread skew. Options are: - Honor: nodes without + taints, along with tainted nodes for which the incoming + pod has a toleration, are included. - Ignore: node + taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent + to the Ignore policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag.' + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", and + try to put balanced number of pods into each bucket. + We define a domain as a particular instance of a + topology. Also, we define an eligible domain as + a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if + TopologyKey is "topology.kubernetes.io/zone", each + zone is a domain of that topology. It's a required + field. + type: string + whenUnsatisfiable: + description: "WhenUnsatisfiable indicates how to deal\ + \ with a pod if it doesn't satisfy the spread constraint.\ + \ - DoNotSchedule (default) tells the scheduler\ + \ not to schedule it. - ScheduleAnyway tells the\ + \ scheduler to schedule the pod in any location,\n\ + \ but giving higher precedence to topologies that\ + \ would help reduce the\n skew.\nA constraint is\ + \ considered \"Unsatisfiable\" for an incoming pod\ + \ if and only if every possible node assignment\ + \ for that pod would violate \"MaxSkew\" on some\ + \ topology. For example, in a 3-zone cluster, MaxSkew\ + \ is set to 1, and pods with the same labelSelector\ + \ spread as 3/1/1: | zone1 | zone2 | zone3 | | P\ + \ P P | P | P | If WhenUnsatisfiable is\ + \ set to DoNotSchedule, incoming pod can only be\ + \ scheduled to zone2(zone3) to become 3/2/1(3/1/2)\ + \ as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1).\ + \ In other words, the cluster can still be imbalanced,\ + \ but scheduler won't make it *more* imbalanced.\ + \ It's a required field." + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + debeziumEngineProperties: + type: object + description: "See https://debezium.io/documentation/reference/stable/development/engine.html#engine-properties\n\ + \ Each property is converted from myPropertyName to my.property.name\n" + properties: + offsetCommitPolicy: + type: string + description: 'Default `io.debezium.engine.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy`. + The name of the Java class of the commit policy. It defines + when offsets commit has to be triggered based on the number + of events processed and the time elapsed since the last commit. + This class must implement the interface OffsetCommitPolicy. + The default is a periodic commity policy based upon time intervals. + + ' + offsetFlushIntervalMs: + type: integer + description: 'Default `60000`. Interval at which to try committing + offsets. The default is 1 minute. + + ' + offsetFlushTimeoutMs: + type: integer + description: 'Default `5000`. Maximum number of milliseconds + to wait for records to flush and partition offset data to + be committed to offset storage before cancelling the process + and restoring the offset data to be committed in a future + attempt. The default is 5 seconds. + + ' + errorsMaxRetries: + type: integer + description: 'Default `-1`. The maximum number of retries on + connection errors before failing (-1 = no limit, 0 = disabled, + > 0 = num of retries). + + ' + errorsRetryDelayInitialMs: + type: integer + description: 'Default `300`. Initial delay (in ms) for retries + when encountering connection errors. This value will be doubled + upon every retry but won’t exceed errorsRetryDelayMaxMs. + + ' + errorsRetryDelayMaxMs: + type: integer + description: 'Default `10000`. Max delay (in ms) between retries + when encountering conn + + ' + transforms: + type: object + description: "Before the messages are delivered to the handler\ + \ it is possible to run them through a pipeline of Kafka Connect\ + \ Simple Message Transforms (SMT). Each SMT can pass the message\ + \ unchanged, modify it or filter it out. The chain is configured\ + \ using property transforms. The property contains a list\ + \ of logical names of the transformations to be applied (the\ + \ specified keys). Properties transforms..type\ + \ then defines the name of the implementation class for each\ + \ transformation and transforms..* configuration\ + \ options that are passed to the transformation.\n\nAn example\ + \ of the configuration is:\n\n```\ntransforms: # (1)\n router:\n\ + \ type: \"org.apache.kafka.connect.transforms.RegexRouter\"\ + \ # (2)\n regex: \"(.*)\" # (3)\n replacement: \"trf$1\"\ + \ # (3)\n filter:\n type: \"io.debezium.embedded.ExampleFilterTransform\"\ + \ # (4)\n```\n\n1. Two transformations are defined - filter\ + \ and router\n2. Implementation of the router transformation\ + \ is org.apache.kafka.connect.transforms.RegexRouter\n3. The\ + \ router transformation has two configurations options -regex\ + \ and replacement\n4. Implementation of the filter transformation\ + \ is io.debezium.embedded.ExampleFilterTransform\n" + additionalProperties: + type: object + description: The properties of this transformation. + additionalProperties: + type: string + predicates: + type: object + description: "Predicates can be applied to transformations to\ + \ make the transformations optional.\n\nAn example of the\ + \ configuration is:\n\n```\npredicates:\n headerExists: #\ + \ (1)\n type: \"org.apache.kafka.connect.transforms.predicates.HasHeaderKey\"\ + \ # (2)\n name: \"header.name\" # (3)\ntransforms:\n filter:\ + \ # (4)\n type: \"io.debezium.embedded.ExampleFilterTransform\"\ + \ # (5)\n predicate: \"headerExists\" # (6)\n negate:\ + \ \"true\" # (7)\n```\n\n1. One predicate is defined - headerExists\n\ + 2. Implementation of the headerExists predicate is org.apache.kafka.connect.transforms.predicates.HasHeaderKey\n\ + 3. The headerExists predicate has one configuration option\ + \ - name\n4. One transformation is defined - filter\n5. Implementation\ + \ of the filter transformation is io.debezium.embedded.ExampleFilterTransform\n\ + 6. The filter transformation requires the predicate headerExists\n\ + 7. The filter transformation expects the value of the predicate\ + \ to be negated, making the predicate determine if the header\ + \ does not exist\n" + additionalProperties: + type: object + description: The properties of this predicate. + additionalProperties: + type: string + status: + type: object + description: 'Status of a StackGres stream. + + ' + properties: + conditions: + type: array + description: 'Possible conditions are: + + + * Running: to indicate when the operation is actually running + + * Completed: to indicate when the operation has completed successfully + + * Failed: to indicate when the operation has failed + + ' + items: + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + message: + description: A human-readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition last transition. + type: string + status: + description: Status of the condition, one of `True`, `False` + or `Unknown`. + type: string + type: + description: Type of deployment condition. + type: string + snapshot: + type: object + description: Snapshot status + properties: + lastEvent: + type: string + description: 'The last snapshot event that the connector has + read. + + ' + milliSecondsSinceLastEvent: + type: integer + description: 'The number of milliseconds since the connector + has read and processed the most recent event. + + ' + totalNumberOfEventsSeen: + type: integer + description: 'The total number of events that this connector + has seen since last started or reset. + + ' + numberOfEventsFiltered: + type: integer + description: 'The number of events that have been filtered by + include/exclude list filtering rules configured on the connector. + + ' + capturedTables: + type: array + items: + type: string + description: 'The list of tables that are captured by the connector. + + ' + queueTotalCapacity: + type: integer + description: 'The length the queue used to pass events between + the snapshotter and the main Kafka Connect loop. + + ' + queueRemainingCapacity: + type: integer + description: 'The free capacity of the queue used to pass events + between the snapshotter and the main Kafka Connect loop. + + ' + totalTableCount: + type: integer + description: 'The total number of tables that are being included + in the snapshot. + + ' + remainingTableCount: + type: integer + description: 'The number of tables that the snapshot has yet + to copy. + + ' + snapshotRunning: + type: boolean + description: 'Whether the snapshot was started. + + ' + snapshotPaused: + type: boolean + description: 'Whether the snapshot was paused. + + ' + snapshotAborted: + type: boolean + description: 'Whether the snapshot was aborted. + + ' + snapshotCompleted: + type: boolean + description: 'Whether the snapshot completed. + + ' + snapshotDurationInSeconds: + type: integer + description: 'The total number of seconds that the snapshot + has taken so far, even if not complete. Includes also time + when snapshot was paused. + + ' + snapshotPausedDurationInSeconds: + type: integer + description: 'The total number of seconds that the snapshot + was paused. If the snapshot was paused several times, the + paused time adds up. + + ' + rowsScanned: + type: object + additionalProperties: + type: integer + description: 'Map containing the number of rows scanned for + each table in the snapshot. Tables are incrementally added + to the Map during processing. Updates every 10,000 rows scanned + and upon completing a table. + + ' + maxQueueSizeInBytes: + type: integer + description: 'The maximum buffer of the queue in bytes. This + metric is available if max.queue.size.in.bytes is set to a + positive long value. + + ' + currentQueueSizeInBytes: + type: integer + description: 'The current volume, in bytes, of records in the + queue. + + ' + chunkId: + type: string + description: 'The identifier of the current snapshot chunk. + + ' + chunkFrom: + type: string + description: 'The lower bound of the primary key set defining + the current chunk. + + ' + chunkTo: + type: string + description: 'The upper bound of the primary key set defining + the current chunk. + + ' + tableFrom: + type: string + description: 'The lower bound of the primary key set of the + currently snapshotted table. + + ' + tableTo: + type: string + description: 'The upper bound of the primary key set of the + currently snapshotted table. + + ' + streaming: + type: object + description: Streaming status + properties: + lastEvent: + type: string + description: 'The last streaming event that the connector has + read. + + ' + milliSecondsSinceLastEvent: + type: integer + description: 'The number of milliseconds since the connector + has read and processed the most recent event. + + ' + totalNumberOfEventsSeen: + type: integer + description: 'The total number of events that this connector + has seen since the last start or metrics reset. + + ' + totalNumberOfCreateEventsSeen: + type: integer + description: 'The total number of create events that this connector + has seen since the last start or metrics reset. + + ' + totalNumberOfUpdateEventsSeen: + type: integer + description: 'The total number of update events that this connector + has seen since the last start or metrics reset. + + ' + totalNumberOfDeleteEventsSeen: + type: integer + description: 'The total number of delete events that this connector + has seen since the last start or metrics reset. + + ' + numberOfEventsFiltered: + type: integer + description: 'The number of events that have been filtered by + include/exclude list filtering rules configured on the connector. + + ' + capturedTables: + type: array + items: + type: string + description: 'The list of tables that are captured by the connector. + + ' + queueTotalCapacity: + type: integer + description: 'The length the queue used to pass events between + the streamer and the main Kafka Connect loop. + + ' + queueRemainingCapacity: + type: integer + description: 'The free capacity of the queue used to pass events + between the streamer and the main Kafka Connect loop. + + ' + connected: + type: boolean + description: 'Flag that denotes whether the connector is currently + connected to the database server. + + ' + milliSecondsBehindSource: + type: integer + description: 'The number of milliseconds between the last change + event’s timestamp and the connector processing it. The values + will incoporate any differences between the clocks on the + machines where the database server and the connector are running. + + ' + numberOfCommittedTransactions: + type: integer + description: 'The number of processed transactions that were + committed. + + ' + sourceEventPosition: + type: object + additionalProperties: + type: string + description: 'The coordinates of the last received event. + + ' + lastTransactionId: + type: string + description: 'Transaction identifier of the last processed transaction. + + ' + maxQueueSizeInBytes: + type: integer + description: 'The maximum buffer of the queue in bytes. This + metric is available if max.queue.size.in.bytes is set to a + positive long value. + + ' + currentQueueSizeInBytes: + type: integer + description: 'The current volume, in bytes, of records in the + queue. + + ' + events: + type: object + description: Events status + properties: + lastEventWasSent: + type: boolean + description: 'It is true if the last event that the stream has + tried to send since the last start or metrics reset was sent + successfully. + + ' + lastEventSent: + type: string + description: 'The last event that the stream has sent since + the last start or metrics reset. + + ' + totalNumberOfEventsSent: + type: integer + description: 'The total number of events that this stream has + sent since the last start or metrics reset. + + ' + lastErrorSeen: + type: string + description: 'The last error seen sending events that this stream + has seen since the last start or metrics reset. + + ' + totalNumberOfErrorsSeen: + type: integer + description: 'The total number of errors sending events that + this stream has seen since the last start or metrics reset. + + ' + failure: + type: string + description: The failure message diff --git a/operators/stackgres/1.15.0-rc1/metadata/annotations.yaml b/operators/stackgres/1.15.0-rc1/metadata/annotations.yaml new file mode 100644 index 00000000000..606d45e9575 --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/metadata/annotations.yaml @@ -0,0 +1,12 @@ +annotations: + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: stackgres + operators.operatorframework.io.bundle.channels.v1: candidate,fast + operators.operatorframework.io.bundle.channel.default.v1: candidate + operators.operatorframework.io.metrics.builder: operator-sdk-v1.32.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: quarkus.javaoperatorsdk.io/v1-alpha + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/operators/stackgres/1.15.0-rc1/tests/scorecard/config.yaml b/operators/stackgres/1.15.0-rc1/tests/scorecard/config.yaml new file mode 100644 index 00000000000..a13269842bd --- /dev/null +++ b/operators/stackgres/1.15.0-rc1/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: + - parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.26.0 + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.26.0 + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.26.0 + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.26.0 + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.26.0 + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.26.0 + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {}